source
stringlengths
3
92
c
stringlengths
26
2.25M
vector_batched.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "seq_mv.h" /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy8( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart, restk; restk = (k-(k/8*8)); if (k > 7) { for (j = 0; j < k-7; j += 8) { jstart = j*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j]*x_data[jstart+i] + alpha[j+1]*x_data[jstart+i+size] + alpha[j+2]*x_data[(j+2)*size+i] + alpha[j+3]*x_data[(j+3)*size+i] + alpha[j+4]*x_data[(j+4)*size+i] + alpha[j+5]*x_data[(j+5)*size+i] + alpha[j+6]*x_data[(j+6)*size+i] + alpha[j+7]*x_data[(j+7)*size+i]; } } } if (restk == 1) { jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-1] * x_data[jstart+i]; } } else if (restk == 2) { jstart = (k-2)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-2] * x_data[jstart+i] + alpha[k-1] * x_data[jstart+size+i]; } } else if (restk == 3) { jstart = (k-3)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-3] * x_data[jstart+i] + alpha[k-2] * x_data[jstart+size+i] + alpha[k-1] * x_data[(k-1)*size+i]; } } else if (restk == 4) { jstart = (k-4)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-4]*x_data[(k-4)*size+i] + alpha[k-3]*x_data[(k-3)*size+i] + alpha[k-2]*x_data[(k-2)*size+i] + alpha[k-1]*x_data[(k-1)*size+i]; } } else if (restk == 5) { #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += + alpha[k-5]*x_data[(k-5)*size+i] + alpha[k-4]*x_data[(k-4)*size+i] + alpha[k-3]*x_data[(k-3)*size+i] + alpha[k-2]*x_data[(k-2)*size+i] + alpha[k-1]*x_data[(k-1)*size+i]; } } else if (restk == 6) { jstart = (k-6)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-6]*x_data[jstart+i] + alpha[k-5]*x_data[jstart+i+size] + alpha[k-4]*x_data[(k-4)*size+i] + alpha[k-3]*x_data[(k-3)*size+i] + alpha[k-2]*x_data[(k-2)*size+i] + alpha[k-1]*x_data[(k-1)*size+i]; } } else if (restk == 7) { jstart = (k-7)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-7]*x_data[jstart+i] + alpha[k-6]*x_data[jstart+i+size] + alpha[k-5]*x_data[(k-5)*size+i] + alpha[k-4]*x_data[(k-4)*size+i] + alpha[k-3]*x_data[(k-3)*size+i] + alpha[k-2]*x_data[(k-2)*size+i] + alpha[k-1]*x_data[(k-1)*size+i]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy4( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart, restk; restk = (k-(k/4*4)); if (k > 3) { for (j = 0; j < k-3; j += 4) { jstart = j*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j]*x_data[jstart+i] + alpha[j+1]*x_data[jstart+i+size] + alpha[j+2]*x_data[(j+2)*size+i] + alpha[j+3]*x_data[(j+3)*size+i]; } } } if (restk == 1) { jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-1] * x_data[jstart+i]; } } else if (restk == 2) { jstart = (k-2)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-2] * x_data[jstart+i] + alpha[k-1] * x_data[jstart+size+i]; } } else if (restk == 3) { jstart = (k-3)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[k-3] * x_data[jstart+i] + alpha[k-2] * x_data[jstart+size+i] + alpha[k-1] * x_data[(k-1)*size+i]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassAxpy( HYPRE_Complex *alpha, hypre_Vector **x, hypre_Vector *y, HYPRE_Int k, HYPRE_Int unroll) { HYPRE_Complex *x_data = hypre_VectorData(x[0]); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x[0]); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassAxpy8(alpha, x, y, k); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassAxpy4(alpha, x, y, k); return hypre_error_flag; } else { for (j = 0; j < k; j++) { jstart = j*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha[j]*x_data[jstart+i]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassInnerProd8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassInnerProd8( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res1; HYPRE_Real res2; HYPRE_Real res3; HYPRE_Real res4; HYPRE_Real res5; HYPRE_Real res6; HYPRE_Real res7; HYPRE_Real res8; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; HYPRE_Int jstart4; HYPRE_Int jstart5; HYPRE_Int jstart6; HYPRE_Int jstart7; restk = (k-(k/8*8)); if (k > 7) { for (j = 0; j < k-7; j += 8) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; res7 = 0; res8 = 0; jstart = j*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; jstart6 = jstart5+size; jstart7 = jstart6+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7,res8) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4+i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5+i]) * x_data[i]; res7 += hypre_conj(y_data[jstart6+i]) * x_data[i]; res8 += hypre_conj(y_data[jstart7+i]) * x_data[i]; } result[j] = res1; result[j+1] = res2; result[j+2] = res3; result[j+3] = res4; result[j+4] = res5; result[j+5] = res6; result[j+6] = res7; result[j+7] = res8; } } if (restk == 1) { res1 = 0; jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; } result[k-1] = res1; } else if (restk == 2) { res1 = 0; res2 = 0; jstart = (k-2)*size; jstart1 = jstart+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; } result[k-2] = res1; result[k-1] = res2; } else if (restk == 3) { res1 = 0; res2 = 0; res3 = 0; jstart = (k-3)*size; jstart1 = jstart+size; jstart2 = jstart1+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; } result[k-3] = res1; result[k-2] = res2; result[k-1] = res3; } else if (restk == 4) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; jstart = (k-4)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; } result[k-4] = res1; result[k-3] = res2; result[k-2] = res3; result[k-1] = res4; } else if (restk == 5) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; jstart = (k-5)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4+i]) * x_data[i]; } result[k-5] = res1; result[k-4] = res2; result[k-3] = res3; result[k-2] = res4; result[k-1] = res5; } else if (restk == 6) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; jstart = (k-6)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4+i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5+i]) * x_data[i]; } result[k-6] = res1; result[k-5] = res2; result[k-4] = res3; result[k-3] = res4; result[k-2] = res5; result[k-1] = res6; } else if (restk == 7) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; res5 = 0; res6 = 0; res7 = 0; jstart = (k-7)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; jstart6 = jstart5+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; res5 += hypre_conj(y_data[jstart4+i]) * x_data[i]; res6 += hypre_conj(y_data[jstart5+i]) * x_data[i]; res7 += hypre_conj(y_data[jstart6+i]) * x_data[i]; } result[k-7] = res1; result[k-6] = res2; result[k-5] = res3; result[k-4] = res4; result[k-3] = res5; result[k-2] = res6; result[k-1] = res7; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassInnerProd4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassInnerProd4( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res1; HYPRE_Real res2; HYPRE_Real res3; HYPRE_Real res4; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; restk = (k-(k/4*4)); if (k > 3) { for (j = 0; j < k-3; j += 4) { res1 = 0; res2 = 0; res3 = 0; res4 = 0; jstart = j*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; res4 += hypre_conj(y_data[jstart3+i]) * x_data[i]; } result[j] = res1; result[j+1] = res2; result[j+2] = res3; result[j+3] = res4; } } if (restk == 1) { res1 = 0; jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; } result[k-1] = res1; } else if (restk == 2) { res1 = 0; res2 = 0; jstart = (k-2)*size; jstart1 = jstart+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; } result[k-2] = res1; result[k-1] = res2; } else if (restk == 3) { res1 = 0; res2 = 0; res3 = 0; jstart = (k-3)*size; jstart1 = jstart+size; jstart2 = jstart1+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res1 += hypre_conj(y_data[jstart+i]) * x_data[i]; res2 += hypre_conj(y_data[jstart1+i]) * x_data[i]; res3 += hypre_conj(y_data[jstart2+i]) * x_data[i]; } result[k-3] = res1; result[k-2] = res2; result[k-1] = res3; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo8 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo8( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res_x1; HYPRE_Real res_x2; HYPRE_Real res_x3; HYPRE_Real res_x4; HYPRE_Real res_x5; HYPRE_Real res_x6; HYPRE_Real res_x7; HYPRE_Real res_x8; HYPRE_Real res_y1; HYPRE_Real res_y2; HYPRE_Real res_y3; HYPRE_Real res_y4; HYPRE_Real res_y5; HYPRE_Real res_y6; HYPRE_Real res_y7; HYPRE_Real res_y8; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; HYPRE_Int jstart4; HYPRE_Int jstart5; HYPRE_Int jstart6; HYPRE_Int jstart7; restk = (k-(k/8*8)); if (k > 7) { for (j = 0; j < k-7; j += 8) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_x7 = 0; res_x8 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; res_y7 = 0; res_y8 = 0; jstart = j*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; jstart6 = jstart5+size; jstart7 = jstart6+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_x8,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7,res_y8) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4+i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4+i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5+i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5+i]) * y_data[i]; res_x7 += hypre_conj(z_data[jstart6+i]) * x_data[i]; res_y7 += hypre_conj(z_data[jstart6+i]) * y_data[i]; res_x8 += hypre_conj(z_data[jstart7+i]) * x_data[i]; res_y8 += hypre_conj(z_data[jstart7+i]) * y_data[i]; } result_x[j] = res_x1; result_x[j+1] = res_x2; result_x[j+2] = res_x3; result_x[j+3] = res_x4; result_x[j+4] = res_x5; result_x[j+5] = res_x6; result_x[j+6] = res_x7; result_x[j+7] = res_x8; result_y[j] = res_y1; result_y[j+1] = res_y2; result_y[j+2] = res_y3; result_y[j+3] = res_y4; result_y[j+4] = res_y5; result_y[j+5] = res_y6; result_y[j+6] = res_y7; result_y[j+7] = res_y8; } } if (restk == 1) { res_x1 = 0; res_y1 = 0; jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; } result_x[k-1] = res_x1; result_y[k-1] = res_y1; } else if (restk == 2) { res_x1 = 0; res_x2 = 0; res_y1 = 0; res_y2 = 0; jstart = (k-2)*size; jstart1 = jstart+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; } result_x[k-2] = res_x1; result_x[k-1] = res_x2; result_y[k-2] = res_y1; result_y[k-1] = res_y2; } else if (restk == 3) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; jstart = (k-3)*size; jstart1 = jstart+size; jstart2 = jstart1+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; } result_x[k-3] = res_x1; result_x[k-2] = res_x2; result_x[k-1] = res_x3; result_y[k-3] = res_y1; result_y[k-2] = res_y2; result_y[k-1] = res_y3; } else if (restk == 4) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; jstart = (k-4)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; } result_x[k-4] = res_x1; result_x[k-3] = res_x2; result_x[k-2] = res_x3; result_x[k-1] = res_x4; result_y[k-4] = res_y1; result_y[k-3] = res_y2; result_y[k-2] = res_y3; result_y[k-1] = res_y4; } else if (restk == 5) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; jstart = (k-5)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_y1,res_y2,res_y3,res_y4,res_y5) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4+i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4+i]) * y_data[i]; } result_x[k-5] = res_x1; result_x[k-4] = res_x2; result_x[k-3] = res_x3; result_x[k-2] = res_x4; result_x[k-1] = res_x5; result_y[k-5] = res_y1; result_y[k-4] = res_y2; result_y[k-3] = res_y3; result_y[k-2] = res_y4; result_y[k-1] = res_y5; } else if (restk == 6) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; jstart = (k-6)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4+i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4+i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5+i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5+i]) * y_data[i]; } result_x[k-6] = res_x1; result_x[k-5] = res_x2; result_x[k-4] = res_x3; result_x[k-3] = res_x4; result_x[k-2] = res_x5; result_x[k-1] = res_x6; result_y[k-6] = res_y1; result_y[k-5] = res_y2; result_y[k-4] = res_y3; result_y[k-3] = res_y4; result_y[k-2] = res_y5; result_y[k-1] = res_y6; } else if (restk == 7) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_x5 = 0; res_x6 = 0; res_x7 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; res_y5 = 0; res_y6 = 0; res_y7 = 0; jstart = (k-7)*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; jstart4 = jstart3+size; jstart5 = jstart4+size; jstart6 = jstart5+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; res_x5 += hypre_conj(z_data[jstart4+i]) * x_data[i]; res_y5 += hypre_conj(z_data[jstart4+i]) * y_data[i]; res_x6 += hypre_conj(z_data[jstart5+i]) * x_data[i]; res_y6 += hypre_conj(z_data[jstart5+i]) * y_data[i]; res_x7 += hypre_conj(z_data[jstart6+i]) * x_data[i]; res_y7 += hypre_conj(z_data[jstart6+i]) * y_data[i]; } result_x[k-7] = res_x1; result_x[k-6] = res_x2; result_x[k-5] = res_x3; result_x[k-4] = res_x4; result_x[k-3] = res_x5; result_x[k-2] = res_x6; result_x[k-1] = res_x7; result_y[k-7] = res_y1; result_y[k-6] = res_y2; result_y[k-5] = res_y3; result_y[k-4] = res_y4; result_y[k-3] = res_y5; result_y[k-2] = res_y6; result_y[k-1] = res_y7; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo4 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo4( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, restk; HYPRE_Real res_x1; HYPRE_Real res_x2; HYPRE_Real res_x3; HYPRE_Real res_x4; HYPRE_Real res_y1; HYPRE_Real res_y2; HYPRE_Real res_y3; HYPRE_Real res_y4; HYPRE_Int jstart; HYPRE_Int jstart1; HYPRE_Int jstart2; HYPRE_Int jstart3; restk = (k-(k/4*4)); if (k > 3) { for (j = 0; j < k-3; j += 4) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_x4 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; res_y4 = 0; jstart = j*size; jstart1 = jstart+size; jstart2 = jstart1+size; jstart3 = jstart2+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; res_x4 += hypre_conj(z_data[jstart3+i]) * x_data[i]; res_y4 += hypre_conj(z_data[jstart3+i]) * y_data[i]; } result_x[j] = res_x1; result_x[j+1] = res_x2; result_x[j+2] = res_x3; result_x[j+3] = res_x4; result_y[j] = res_y1; result_y[j+1] = res_y2; result_y[j+2] = res_y3; result_y[j+3] = res_y4; } } if (restk == 1) { res_x1 = 0; res_y1 = 0; jstart = (k-1)*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; } result_x[k-1] = res_x1; result_y[k-1] = res_y1; } else if (restk == 2) { res_x1 = 0; res_x2 = 0; res_y1 = 0; res_y2 = 0; jstart = (k-2)*size; jstart1 = jstart+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; } result_x[k-2] = res_x1; result_x[k-1] = res_x2; result_y[k-2] = res_y1; result_y[k-1] = res_y2; } else if (restk == 3) { res_x1 = 0; res_x2 = 0; res_x3 = 0; res_y1 = 0; res_y2 = 0; res_y3 = 0; jstart = (k-3)*size; jstart1 = jstart+size; jstart2 = jstart1+size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x1 += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y1 += hypre_conj(z_data[jstart+i]) * y_data[i]; res_x2 += hypre_conj(z_data[jstart1+i]) * x_data[i]; res_y2 += hypre_conj(z_data[jstart1+i]) * y_data[i]; res_x3 += hypre_conj(z_data[jstart2+i]) * x_data[i]; res_y3 += hypre_conj(z_data[jstart2+i]) * y_data[i]; } result_x[k-3] = res_x1; result_x[k-2] = res_x2; result_x[k-1] = res_x3; result_y[k-3] = res_y1; result_y[k-2] = res_y2; result_y[k-1] = res_y3; } return hypre_error_flag; } HYPRE_Int hypre_SeqVectorMassInnerProd( hypre_Vector *x, hypre_Vector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y[0]); HYPRE_Real res; HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassInnerProd8(x,y,k,result); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassInnerProd4(x,y,k,result); return hypre_error_flag; } else { for (j = 0; j < k; j++) { res = 0; jstart = j*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res += hypre_conj(y_data[jstart+i]) * x_data[i]; } result[j] = res; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorMassDotpTwo *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorMassDotpTwo( hypre_Vector *x, hypre_Vector *y, hypre_Vector **z, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result_x, HYPRE_Real *result_y) { HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Complex *z_data = hypre_VectorData(z[0]); HYPRE_Real res_x, res_y; HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i, j, jstart; if (unroll == 8) { hypre_SeqVectorMassDotpTwo8(x,y,z,k,result_x,result_y); return hypre_error_flag; } else if (unroll == 4) { hypre_SeqVectorMassDotpTwo4(x,y,z,k,result_x,result_y); return hypre_error_flag; } else { for (j = 0; j < k; j++) { res_x = result_x[j]; res_y = result_y[j]; jstart = j*size; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:res_x,res_y) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { res_x += hypre_conj(z_data[jstart+i]) * x_data[i]; res_y += hypre_conj(z_data[jstart+i]) * y_data[i]; } result_x[j] = res_x; result_y[j] = res_y; } } return hypre_error_flag; }
bug_proxy_task_dep_waiting.c
// RUN: %libomp-compile -lpthread && %libomp-run // REQUIRES: openmp-4.5 // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; #pragma omp task shared(first_task_finished) depend(inout: dep) { first_task_finished = 1; } int second_task_finished = 0; #pragma omp task shared(second_task_finished) depend(in: dep) { second_task_finished = 1; } // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); #pragma omp taskwait // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) { for (t4=max(max(ceild(t1-126,128),ceild(8*t2-Nz-499,512)),ceild(4*t3-Ny-499,512));t4<=min(min(floord(4*Nt+Nx-9,512),floord(4*t1+Nx-1,512)),floord(4*t3+Nx-9,512));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),128*t4+126);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
relu_hcl_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #ifndef _RELU_KERNEL_ARM_H_ #define _RELU_KERNEL_ARM_H_ #include <arm_neon.h> #include "../../../../../include/tengine_ir.h" #include "../../../../op/relu_param.h" static int perf_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; if (negative_slope == 0) { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _zero = vdupq_n_f32(0.f); for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(src); _p = vmaxq_f32(_p, _zero); vst1q_f32(dst, _p); src += 4; dst += 4; } #endif for (; remain > 0; remain--) { if (src[0] < 0) dst[0] = 0; else dst[0] = src[0]; src++; dst++; } } } else { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _slope = vdupq_n_f32(negative_slope); for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(src); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(dst, _p); src += 4; dst += 4; } #endif for (; remain > 0; remain--) { if (src[0] < 0) dst[0] = src[0] * negative_slope; else dst[0] = src[0]; src++; dst++; } } } return 0; } #endif
GB_unaryop__lnot_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint64 // op(A') function: GB_tran__lnot_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_fp32 // op(A') function: GB_tran__abs_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_fp32 ( uint16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template<class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int> &m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01), min_points((std::numeric_limits<std::size_t>::max)()), epsilon(-1), normal_threshold((FT) 0.9), cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::RANSAC_octree<Traits> Direct_octree; typedef internal::RANSAC_octree<Traits> Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template<class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t), m_direct_octrees(nullptr), m_global_octree(nullptr), m_num_subsets(0), m_num_available_points(0), m_num_total_points(0), m_valid_iterators(false) { } /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits & traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map &point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map &normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range &input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template<class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t) (std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points)) / std::log(2.)) - 9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1; s >= 0; --s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0; i < subsetSize; i++) { std::size_t index = get_default_random()(2); index = index + (i << 1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->refine(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap ); m_global_octree->refine(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0; i < m_num_subsets; i++) delete m_direct_octrees[i]; delete[] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)> &callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0; i < m_num_subsets; i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); if (m_options.min_points > m_num_available_points) return true; // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples m_required_samples = 0; for (std::size_t i = 0; i < m_shape_factories.size(); i++) { Shape *tmp = (Shape *) m_shape_factories[i](); m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Search (remaining_points / min_points) shapes (max 200 per iteration, min 1) std::size_t search_number = (std::min)(std::size_t(200), (std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)), std::size_t(1))); for (std::size_t nb = 0; nb < search_number; ++ nb) { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = drawSamplesFromCellContainingPoint (m_global_octree, get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, m_required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives bool candidate_success = false; for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); candidate_success = true; } else { delete p; } } else { delete p; } } if (!candidate_success) ++ failed_candidates; } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while (!force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = score(m_global_octree, best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0; i < candidates.size() - 1; i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow(1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0; i < indices_points_best_candidate.size(); i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0; j < m_num_subsets; j++) { if (m_direct_octrees[j]) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1; i < m_num_subsets; i++) { subset_sizes[i] = subset_sizes[i - 1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i = 0; i < candidates.size() - 1; i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while ((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0; i < candidates.size(); i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back(pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { auto upper_bound = static_cast<unsigned int>(m_global_octree->maxLevel() + 1); return (int) get_default_random()(upper_bound); } Shape *get_best_candidate(std::vector<Shape *> &candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int) candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i = 0; i < candidate->m_nb_subset_used; i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = score(m_direct_octrees[candidate->m_nb_subset_used], candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)((FT)std::pow(FT(1) - FT(largest_candidate) / (FT(num_pts) * FT(octree_depth+1) * FT(1 << (m_required_samples - 1))), int(num_candidates)), FT(1)); } template<class Octree> std::size_t score(const Octree *octree, Shape *candidate, std::vector<int> &shapeIndex, FT epsilon, FT normal_threshold) { typedef typename Octree::Node Cell; std::stack<Cell> stack; stack.push(octree->root()); while (!stack.empty()) { Cell cell = stack.top(); stack.pop(); FT width = octree->width() / (1 << (cell.depth())); FT diag = CGAL::sqrt(FT(3) * width * width) + epsilon; FT dist = candidate->squared_distance(octree->barycenter(cell)); if (dist > (diag * diag)) continue; // differ between full or partial overlap? // if full overlap further traversal of this branch is not necessary if (cell.is_leaf()) { std::vector<std::size_t> indices; indices.reserve(cell.size()); for (std::size_t i = 0; i < cell.size(); i++) { if (shapeIndex[octree->index(cell, i)] == -1) { indices.push_back(octree->index(cell, i)); } } candidate->cost_function(epsilon, normal_threshold, indices); } else { if (!cell.is_leaf()) { for (std::size_t i = 0; i < 8; i++) { if (!cell[i].empty()) stack.push(cell[i]); } } } } return candidate->m_indices.size(); } template<class Octree> const typename Octree::Node node_containing_point(const Octree *octree, const Point &p, std::size_t level) { // Find the node containing the point typename Octree::Node cur = octree->root(); while (!cur.is_null() && cur.depth() < level) { // If cur is a leaf node, its child is null if (cur.is_leaf()) return typename Octree::Node(); // If that child is empty, return null if (cur.empty()) return typename Octree::Node(); // Determine the coordinate of the child Point center = octree->barycenter(cur); std::bitset<3> coordinate; coordinate[0] = center.x() <= p.x(); coordinate[1] = center.y() <= p.y(); coordinate[2] = center.z() <= p.z(); // Otherwise, return the correct child of cur cur = cur[coordinate.to_ulong()]; } return cur; } template<class Octree> bool drawSamplesFromCellContainingPoint(const Octree *octree, const Point &p, std::size_t level, std::set<std::size_t> &indices, const std::vector<int> &shapeIndex, std::size_t requiredSamples) { typedef typename Octree::Node Cell; const Cell cur = node_containing_point(octree, p, level); // Stop if the node we need doesn't exist if (cur.is_null()) return false; // Count point indices that map to -1 in the shape index std::size_t enough = 0; for (auto j : cur) { if (shapeIndex[j] == -1) enough++; if (enough >= requiredSamples) break; } // Make sure we found enough samples if (enough < requiredSamples) return false; do { std::size_t p = CGAL::get_default_random(). uniform_int<std::size_t>(0, cur.size() - 1); std::size_t j = octree->index(cur, p); if (shapeIndex[j] == -1) indices.insert(j); } while (indices.size() < requiredSamples); return true; } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; std::size_t m_required_samples; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
2symbol_new.h
//// //// Created by nikita on 30.09.2020. //// // //#ifndef CPU_2SYMBOL_NEW_H //#define CPU_2SYMBOL_NEW_H // // // // //#include <vector> //#include <cmath> //#include <bitset> // // //template<class Input> //inline void process_cubes_antidiag_bin(int lower_bound, int upper_bound, int left_edge, int top_edge, // Input braid_ones, // Input *bitset_left_strand_map, // Input *bitset_top_strand_map, // Input *a_reverse, Input *b) { // // for (int j = lower_bound; j < upper_bound; ++j) { // Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted; // // Input left_strand = bitset_left_strand_map[left_edge + j]; // Input top_strand = bitset_top_strand_map[top_edge + j]; // Input symbol_a = a_reverse[left_edge + j]; // Input symbol_b = b[top_edge + j]; // // int rev_counter = (sizeof(Input) * 8 - 2); // Input mask = Input(1); //// Input mask_r = Input(1) << rev_counter; // // // // upper half //#pragma GCC unroll 128 // for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2 - 1; ++inside_diag_num) { // left_cap = left_strand >> rev_counter; // symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b); // symbols &= (symbols >> 1) & braid_ones; // combing_condition = mask & (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = combing_condition ^ braid_ones; // // if (combing_condition) { // top_strand_shifted = top_strand << rev_counter; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); // // combing_condition <<= rev_counter; // rev_combing_cond = combing_condition ^ braid_ones; // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // // // rev_counter -= 2; // mask = (mask << 2) | Input(1); //// mask_r = mask_r | (mask_r >> 2); // } // // // center // symbols = (~(symbol_a ^ symbol_b)); // symbols &= (symbols >> 1) & braid_ones; // combing_condition = (symbols | ((~left_strand) & top_strand)); // rev_combing_cond = combing_condition ^ braid_ones; // if (combing_condition) { // top_strand_shifted = top_strand; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand); // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // // // mask = braid_ones; //// mask_r = braid_ones; // // //lower half //#pragma GCC unroll 128 // for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2 - 1; ++inside_diag_num) { // mask <<= 2; //// mask_r >>= 2; // // left_cap = left_strand << (2 * (inside_diag_num + 1)); // symbols = ~(((symbol_a << (2 * (inside_diag_num + 1)))) ^ symbol_b); // symbols &= (symbols >> 1) & braid_ones; // // combing_condition = mask & (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = combing_condition ^ braid_ones; // // if (combing_condition) { // top_strand_shifted = top_strand >> (2 * (inside_diag_num + 1)); // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); //// symbols = ~(((symbol_a)) ^ (symbol_b >> (2 * (inside_diag_num + 1)))); //// symbols &= (symbols >> 1) & braid_ones; // //// combing_condition = mask_r & (symbols | ((~(left_strand) & top_strand_shifted))); // combing_condition >>= (2 * (inside_diag_num + 1)); // rev_combing_cond = combing_condition ^ braid_ones; // // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // } // // // bitset_left_strand_map[left_edge + j] = left_strand; // // bitset_top_strand_map[top_edge + j] = top_strand; // } //} // //template<class Input> //inline void process_cubes_antidiag_mpi_bin(int lower_bound, int upper_bound, int left_edge, int top_edge, // Input *bitset_left_strand_map, // Input *bitset_top_strand_map, // Input *a_reverse, Input *b) { // // const int upper = sizeof(Input) * 8 - 1; // //#pragma omp for simd schedule(static) aligned(bitset_top_strand_map, bitset_left_strand_map, a_reverse, b:sizeof(Input)*8) // for (int j = lower_bound; j < upper_bound; ++j) { // Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted; // Input left_strand = bitset_left_strand_map[left_edge + j]; // Input top_strand = bitset_top_strand_map[top_edge + j]; // Input symbol_a = a_reverse[left_edge + j]; // Input symbol_b = b[top_edge + j]; // // Input mask = Input(1); // // // // upper half //#pragma GCC unroll 256 // for (int rev_counter = (sizeof(Input) * 8 - 1); rev_counter > 0; rev_counter--) { // left_cap = left_strand >> rev_counter; // symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b); // combing_condition = mask & (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = ~combing_condition; // // top_strand_shifted = top_strand << rev_counter; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); // // combing_condition <<= rev_counter; // rev_combing_cond = ~combing_condition; // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // // mask = (mask << 1) | Input(1); // } // // // center // symbols = (~(symbol_a ^ symbol_b)); //// symbols &= (symbols >> 1) & braid_ones; // combing_condition = (symbols | ((~left_strand) & top_strand)); // rev_combing_cond = ~combing_condition; // top_strand_shifted = top_strand; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand); // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // // mask = ~Input(0); // // //lower half //#pragma GCC unroll 256 // for (int inside_diag_num = 1; inside_diag_num < upper + 1; inside_diag_num++) { // mask <<= 1; // // left_cap = left_strand << (inside_diag_num); // symbols = ~(((symbol_a << inside_diag_num)) ^ symbol_b); //// symbols &= (symbols >> 1) & braid_ones; // // combing_condition = mask & (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = ~combing_condition; // // top_strand_shifted = top_strand >> ((inside_diag_num)); // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); // combing_condition >>= ((inside_diag_num)); // rev_combing_cond = ~combing_condition; // // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // // // bitset_left_strand_map[left_edge + j] = left_strand; // bitset_top_strand_map[top_edge + j] = top_strand; // } //} // // //template<class Input> //inline void process_cube_with_exception_bin(int left_edge, int top_edge, int j, Input braid_ones, Input l_active_mask, // Input r_active_mask, // Input *bitset_left_strand_map, Input *bitset_top_strand_map, // Input *a_reverse, // Input *b) { // // Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted; // // Input left_strand = bitset_left_strand_map[left_edge + j]; // Input top_strand = bitset_top_strand_map[top_edge + j]; // Input symbol_a = a_reverse[left_edge + j]; // Input symbol_b = b[top_edge + j]; // // int rev_counter = (sizeof(Input) * 8 - 2); // Input mask = Input(1); // Input mask_r = Input(1) << rev_counter; // // // // upper half // for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2; ++inside_diag_num) { // left_cap = left_strand >> rev_counter; // symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b); // symbols &= (symbols >> 1) & braid_ones; // combing_condition = // r_active_mask & (l_active_mask >> rev_counter) & mask & (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = combing_condition ^ braid_ones; // // if (combing_condition) { // top_strand_shifted = top_strand << rev_counter; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); // // symbols = ~(((symbol_a)) ^ (symbol_b << rev_counter)); // symbols &= (symbols >> 1) & braid_ones; // combing_condition = (r_active_mask << rev_counter) & l_active_mask & mask_r & // (symbols | ((~(left_strand) & top_strand_shifted))); // rev_combing_cond = combing_condition ^ braid_ones; // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // // // rev_counter -= 2; // mask = (mask << 2) | Input(1); // mask_r = mask_r | (mask_r >> 2); // } // // // center // symbols = (~(symbol_a ^ symbol_b)); // symbols &= (symbols >> 1) & braid_ones; // combing_condition = l_active_mask & r_active_mask & (symbols | ((~left_strand) & top_strand)); // rev_combing_cond = combing_condition ^ braid_ones; // if (combing_condition) { // top_strand_shifted = top_strand; // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand); // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // // // mask = braid_ones; // mask_r = braid_ones; // // //lower half // for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2; ++inside_diag_num) { // mask <<= 2; // mask_r >>= 2; // // left_cap = left_strand << (2 * (inside_diag_num + 1)); // symbols = ~(((symbol_a << (2 * (inside_diag_num + 1)))) ^ symbol_b); // symbols &= (symbols >> 1) & braid_ones; // // combing_condition = r_active_mask & (l_active_mask << (2 * (inside_diag_num + 1))) & mask & // (symbols | (((~(left_cap)) & top_strand))); // rev_combing_cond = combing_condition ^ braid_ones; // // if (combing_condition) { // top_strand_shifted = top_strand >> (2 * (inside_diag_num + 1)); // top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap); // symbols = ~(((symbol_a)) ^ (symbol_b >> (2 * (inside_diag_num + 1)))); // symbols &= (symbols >> 1) & braid_ones; // // combing_condition = (r_active_mask >> (2 * (inside_diag_num + 1))) & l_active_mask & mask_r & // (symbols | ((~(left_strand) & top_strand_shifted))); // rev_combing_cond = combing_condition ^ braid_ones; // // left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted); // } // } // // // bitset_left_strand_map[left_edge + j] = left_strand; // bitset_top_strand_map[top_edge + j] = top_strand; // //} // // //template<class Input> //int prefix_lcs_via_braid_bits_2symbol_v2_full_mask(Input *a_reverse, int a_size, int a_total_symbols, // Input *b, int b_size, int b_total_symbols, int threads_num) { // // // Input *bitset_left_strand_map = static_cast<Input *> (aligned_alloc(sizeof(Input), sizeof(Input) * a_size)); // Input *bitset_top_strand_map = static_cast<Input *> (aligned_alloc(sizeof(Input), sizeof(Input) * b_size)); // // // auto m = a_size, n = b_size; // // int dis_braid = 0; // auto num_diag = m + n - 1; // auto total_same_length_diag = num_diag - (m - 1) - (m - 1); // // Input braid_ones = ~Input(0); // // //#pragma omp parallel num_threads(threads_num) default(none) shared(bitset_left_strand_map, bitset_top_strand_map, a_reverse, b, m, n, dis_braid, total_same_length_diag, braid_ones) // { // //#pragma omp for simd schedule(static) aligned(bitset_left_strand_map:sizeof(Input)*8) // for (int k = 0; k < n; ++k) { // bitset_top_strand_map[k] = Input(0); // } // //#pragma omp for simd schedule(static) aligned(bitset_left_strand_map:sizeof(Input)*8) // for (int k = 0; k < m; ++k) { // bitset_left_strand_map[k] = braid_ones; // } // // for (int diag_len = 0; diag_len < m - 1; diag_len++) { // process_cubes_antidiag_mpi_bin(0, diag_len + 1, m - 1 - diag_len, 0, bitset_left_strand_map, // bitset_top_strand_map, a_reverse, b); // // } // // for (int k = 0; k < total_same_length_diag; k++) { // process_cubes_antidiag_mpi_bin(0, m, 0, k, bitset_left_strand_map, // bitset_top_strand_map, a_reverse, b); // } // // auto start_j = total_same_length_diag; // // for (int diag_len = m - 1; diag_len >= 1; diag_len--) { // process_cubes_antidiag_mpi_bin(0, diag_len, 0, start_j, bitset_left_strand_map, // bitset_top_strand_map, a_reverse, b); // start_j++; // } // //#pragma omp for simd schedule(static) reduction(+:dis_braid) aligned(bitset_top_strand_map, bitset_left_strand_map, a_reverse, b:sizeof(Input)*8) // for (int i1 = 0; i1 < m; ++i1) { // // Brian Kernighan’s Algorithm // int counter = 0; // Input number = bitset_left_strand_map[i1]; // // LogNumber // while (number) { // number &= (number - 1); // counter++; // } // dis_braid += counter; // } // // } // // // free(bitset_left_strand_map); // free(bitset_top_strand_map); // // return a_total_symbols - dis_braid; // //} // // //#endif //CPU_2SYMBOL_NEW_H
GB_matlab_helper.c
//------------------------------------------------------------------------------ // GB_matlab_helper.c: helper functions for MATLAB interface //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // These functions are only used by the MATLAB interface for // SuiteSparse:GraphBLAS. #include "GB_matlab_helper.h" // determine the number of threads to use #define GB_NTHREADS(work) \ int nthreads_max = GB_Global_nthreads_max_get ( ) ; \ double chunk = GB_Global_chunk_get ( ) ; \ int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //------------------------------------------------------------------------------ // GB_matlab_helper1: convert 0-based indices to 1-based //------------------------------------------------------------------------------ void GB_matlab_helper1 // convert zero-based indices to one-based ( double *restrict I_double, // output array const GrB_Index *restrict I, // input array int64_t nvals // size of input and output arrays ) { GB_NTHREADS (nvals) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { I_double [k] = (double) (I [k] + 1) ; } } //------------------------------------------------------------------------------ // GB_matlab_helper1i: convert 0-based indices to 1-based //------------------------------------------------------------------------------ void GB_matlab_helper1i // convert zero-based indices to one-based ( int64_t *restrict I, // input/output array int64_t nvals // size of input/output array ) { GB_NTHREADS (nvals) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { I [k] ++ ; } } //------------------------------------------------------------------------------ // GB_matlab_helper2: create structure for dense matrix //------------------------------------------------------------------------------ void GB_matlab_helper2 // fill Xp and Xi for a dense matrix ( GrB_Index *restrict Xp, // size ncols+1 GrB_Index *restrict Xi, // size nrows*ncols int64_t ncols, int64_t nrows ) { GB_NTHREADS (ncols) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t j = 0 ; j <= ncols ; j++) { Xp [j] = j * nrows ; } double work = ((double) ncols) * ((double) nrows) ; nthreads = GB_nthreads (work, chunk, nthreads_max) ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ collapse(2) for (int64_t j = 0 ; j < ncols ; j++) { for (int64_t i = 0 ; i < nrows ; i++) { Xi [j * nrows + i] = i ; } } } //------------------------------------------------------------------------------ // GB_matlab_helper3: convert 1-based indices to 0-based //------------------------------------------------------------------------------ bool GB_matlab_helper3 // return true if OK, false on error ( int64_t *restrict List, // size len, output array const double *restrict List_double, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; bool ok = true ; int64_t listmax = -1 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:ok) reduction(max:listmax) for (int64_t k = 0 ; k < len ; k++) { double x = List_double [k] ; int64_t i = (int64_t) x ; ok = ok && (x == (double) i) ; listmax = GB_IMAX (listmax, i) ; List [k] = i - 1 ; } (*List_max) = listmax ; return (ok) ; } //------------------------------------------------------------------------------ // GB_matlab_helper3i: convert 1-based indices to 0-based //------------------------------------------------------------------------------ void GB_matlab_helper3i ( int64_t *restrict List, // size len, output array const int64_t *restrict List_int64, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; int64_t listmax = -1 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(max:listmax) for (int64_t k = 0 ; k < len ; k++) { int64_t i = List_int64 [k] ; listmax = GB_IMAX (listmax, i) ; List [k] = i - 1 ; } (*List_max) = listmax ; } //------------------------------------------------------------------------------ // GB_matlab_helper4: find the max entry in an index list //------------------------------------------------------------------------------ int64_t GB_matlab_helper4 // find max (I) + 1 ( const GrB_Index *restrict I, // array of size len const int64_t len ) { GB_NTHREADS (len) ; GrB_Index imax = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(max:imax) for (int64_t k = 0 ; k < len ; k++) { imax = GB_IMAX (imax, I [k]) ; } if (len > 0) imax++ ; return (imax) ; } //------------------------------------------------------------------------------ // GB_matlab_helper5: construct pattern of S for gblogassign //------------------------------------------------------------------------------ void GB_matlab_helper5 // construct pattern of S ( GrB_Index *restrict Si, // array of size anz GrB_Index *restrict Sj, // array of size anz const GrB_Index *restrict Mi, // array of size mnz const GrB_Index *restrict Mj, // array of size mnz GrB_Index *restrict Ai, // array of size anz const GrB_Index anz ) { GB_NTHREADS (anz) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < anz ; k++) { Si [k] = Mi [Ai [k]] ; Sj [k] = Mj [Ai [k]] ; } } //------------------------------------------------------------------------------ // GB_matlab_helper6: set bool array to all true gblogextract //------------------------------------------------------------------------------ void GB_matlab_helper6 // set Gbool to all true ( bool *restrict Gbool, // array of size gnvals const GrB_Index gnvals ) { GB_NTHREADS (gnvals) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < gnvals ; k++) { Gbool [k] = true ; } } //------------------------------------------------------------------------------ // GB_matlab_helper7: Kx = uint64 (0:mnz-1), for gblogextract //------------------------------------------------------------------------------ void GB_matlab_helper7 // Kx = uint64 (0:mnz-1) ( uint64_t *restrict Kx, // array of size mnz const GrB_Index mnz ) { GB_NTHREADS (mnz) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < mnz ; k++) { Kx [k] = k ; } } //------------------------------------------------------------------------------ // GB_matlab_helper8: expand a scalar into an array //------------------------------------------------------------------------------ void GB_matlab_helper8 ( GB_void *C, // output array of size nvals * s GB_void *A, // input scalar of size s GrB_Index nvals, // size of C size_t s // size of each scalar ) { GB_NTHREADS (nvals) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { // C [k] = A [0] memcpy (C + k * s, A, s) ; } }
phonopy.c
/* Copyright (C) 2021 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "phonopy.h" #include <float.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include "derivative_dynmat.h" #include "dynmat.h" #include "rgrid.h" #include "tetrahedron_method.h" #define KB 8.6173382568083159E-05 static void set_index_permutation_symmetry_fc(double *fc, const int natom); static void set_translational_symmetry_fc(double *fc, const int natom); static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); /* static double get_energy(double temperature, double f); */ static void distribute_fc2(double (*fc2)[3][3], const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int *permutations, const int *map_atoms, const int *map_syms, const int num_rot, const int num_pos); static int nint(const double a); void phpy_transform_dynmat_to_fc(double *fc, const double *dm, const double (*comm_points)[3], const double (*svecs)[3], const long (*multi)[2], const double *masses, const long *s2pp_map, const long *fc_index_map, const long num_patom, const long num_satom) { dym_transform_dynmat_to_fc(fc, dm, comm_points, svecs, multi, masses, s2pp_map, fc_index_map, num_patom, num_satom); } long phpy_get_dynamical_matrix_at_q(double *dynamical_matrix, const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const double *mass, const long *s2p_map, const long *p2s_map, const double (*charge_sum)[3][3], const long with_openmp) { return dym_get_dynamical_matrix_at_q(dynamical_matrix, num_patom, num_satom, fc, q, svecs, multi, mass, s2p_map, p2s_map, charge_sum, 1); } void phpy_get_charge_sum( double (*charge_sum)[3][3], const long num_patom, const double factor, /* 4pi/V*unit-conv and denominator */ const double q_cart[3], const double (*born)[3][3]) { dym_get_charge_sum(charge_sum, num_patom, factor, q_cart, born); } void phpy_get_recip_dipole_dipole( double *dd, /* [natom, 3, natom, 3, (real,imag)] */ const double *dd_q0, /* [natom, 3, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double q_cart[3], const double *q_direction_cart, /* must be pointer */ const double (*born)[3][3], const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double factor, /* 4pi/V*unit-conv */ const double lambda, const double tolerance) { dym_get_recip_dipole_dipole(dd, dd_q0, G_list, num_G, num_patom, q_cart, q_direction_cart, born, dielectric, pos, factor, lambda, tolerance); } void phpy_get_recip_dipole_dipole_q0( double *dd_q0, /* [natom, 3, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double (*born)[3][3], const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance) { dym_get_recip_dipole_dipole_q0(dd_q0, G_list, num_G, num_patom, born, dielectric, pos, lambda, tolerance); } void phpy_get_derivative_dynmat_at_q( double *derivative_dynmat, const long num_patom, const long num_satom, const double *fc, const double *q, const double *lattice, /* column vector */ const double (*svecs)[3], const long (*multi)[2], const double *mass, const long *s2p_map, const long *p2s_map, const double nac_factor, const double *born, const double *dielectric, const double *q_direction) { ddm_get_derivative_dynmat_at_q( derivative_dynmat, num_patom, num_satom, fc, q, lattice, svecs, multi, mass, s2p_map, p2s_map, nac_factor, born, dielectric, q_direction); } void phpy_get_relative_grid_address(long relative_grid_address[24][4][3], PHPYCONST double reciprocal_lattice[3][3]) { thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); } void phpy_get_all_relative_grid_address( long relative_grid_address[4][24][4][3]) { thm_get_all_relative_grid_address(relative_grid_address); } double phpy_get_integration_weight(const double omega, PHPYCONST double tetrahedra_omegas[24][4], const char function) { return thm_get_integration_weight(omega, tetrahedra_omegas, function); } void phpy_get_tetrahedra_frequenies(double *freq_tetras, const long mesh[3], const long *grid_points, PHPYCONST long (*grid_address)[3], PHPYCONST long (*relative_grid_address)[3], const long *gp_ir_index, const double *frequencies, const long num_band, const long num_gp) { long is_shift[3] = {0, 0, 0}; long i, j, k, gp; long g_addr[3]; long address_double[3]; /* relative_grid_address[4, 24, 3] is viewed as [96, 3]. */ for (i = 0; i < num_gp; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } rgd_get_double_grid_address(address_double, g_addr, mesh, is_shift); gp = rgd_get_double_grid_index(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } } void phpy_tetrahedron_method_dos(double *dos, const long mesh[3], PHPYCONST long (*grid_address)[3], PHPYCONST long (*relative_grid_address)[4][3], const long *grid_mapping_table, const double *freq_points, const double *frequencies, const double *coef, const long num_freq_points, const long num_ir_gp, const long num_band, const long num_coef, const long num_gp) { long is_shift[3] = {0, 0, 0}; long i, j, k, l, m, q, r, count; long ir_gps[24][4]; long g_addr[3]; double tetrahedra[24][4]; long address_double[3]; long *gp2ir, *ir_grid_points; long *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; gp2ir = (long *)malloc(sizeof(long) * num_gp); ir_grid_points = (long *)malloc(sizeof(long) * num_ir_gp); weights = (long *)malloc(sizeof(long) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, \ tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } rgd_get_double_grid_address(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[rgd_get_double_grid_index(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; } void phpy_get_thermal_properties(double *thermal_props, const double *temperatures, const double *freqs, const long *weights, const long num_temp, const long num_qpoints, const long num_bands, const double cutoff_frequency) { long i, j, k; double f; double *tp; tp = (double *)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, f) for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++) { f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * weights[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * weights[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * weights[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; } void phpy_distribute_fc2(double (*fc2)[3][3], const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int *permutations, const int *map_atoms, const int *map_syms, const int num_rot, const int num_pos) { distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); } int phpy_compute_permutation(int *rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i, j, k, l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { return 0; } } return 1; } void phpy_set_smallest_vectors_sparse( double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int (*lattice_points)[3], const int num_lattice_points, PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double *length; double(*vec)[3]; length = (double *)malloc(sizeof(double) * num_lattice_points); vec = (double(*)[3])malloc(sizeof(double[3]) * num_lattice_points); for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < num_lattice_points; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < num_lattice_points; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < num_lattice_points; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform back to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } if (count > 27) { /* should not be greater than 27 */ printf("Warning (gsv_set_smallest_vectors_sparse): "); printf("number of shortest vectors is out of range,\n"); break; } else { multiplicity[i * num_pos_from + j] = count; } } } free(length); length = NULL; free(vec); vec = NULL; } void phpy_set_smallest_vectors_dense( double (*smallest_vectors)[3], long (*multiplicity)[2], PHPYCONST double (*pos_to)[3], const long num_pos_to, PHPYCONST double (*pos_from)[3], const long num_pos_from, PHPYCONST long (*lattice_points)[3], const long num_lattice_points, PHPYCONST double reduced_basis[3][3], PHPYCONST long trans_mat[3][3], const long initialize, const double symprec) { long i, j, k, l, count, adrs; double length_tmp, minimum, vec_xyz; double *length; double(*vec)[3]; length = (double *)malloc(sizeof(double) * num_lattice_points); vec = (double(*)[3])malloc(sizeof(double[3]) * num_lattice_points); adrs = 0; for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < num_lattice_points; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < num_lattice_points; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < num_lattice_points; k++) { if (length[k] - minimum < symprec) { if (!initialize) { for (l = 0; l < 3; l++) { /* Transform back to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[adrs + count][l] = vec_xyz; } } count++; } } if (initialize) { multiplicity[i * num_pos_from + j][0] = count; multiplicity[i * num_pos_from + j][1] = adrs; } adrs += count; } } free(length); length = NULL; free(vec); vec = NULL; } void phpy_perm_trans_symmetrize_fc(double *fc, const int n_satom, const int level) { int i, j, k, l, iter; double sum; for (iter = 0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); } void phpy_perm_trans_symmetrize_compact_fc(double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int level) { int i, j, k, l, n, iter; double sum; for (iter = 0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ phpy_set_index_permutation_symmetry_compact_fc( fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } phpy_set_index_permutation_symmetry_compact_fc( fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); } void phpy_set_index_permutation_symmetry_compact_fc( double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char *)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_index_permutation_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(-f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int *permutations, /* shape[n_rot][n_pos] */ const int *map_atoms, /* shape [n_pos] */ const int *map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double(*fc2_done)[3]; double(*fc2_todo)[3]; double(*r_cart)[3]; const int *permutation; atom_list_reverse = NULL; atom_list_reverse = (int *)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int nint(const double a) { if (a < 0.0) return (int)(a - 0.5); else return (int)(a + 0.5); }
pr59152.c
/* PR middle-end/59152 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fipa-pure-const" } */ extern int b[]; void foo (void) { unsigned long v1, v2, v3; #pragma omp parallel for schedule(static, 32) collapse(3) for (v1 = 0; v1 < 20; v1 += 2) for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3) for (v3 = 10; v3 > 0; v3--) #pragma omp atomic b[v3]++; } void bar (void) { unsigned long v1, v2, v3; #pragma omp parallel for schedule(static) collapse(3) for (v1 = 0; v1 < 20; v1 += 2) for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3) for (v3 = 10; v3 > 0; v3--) #pragma omp atomic b[v3]++; } void baz (void) { unsigned long v1, v2, v3; #pragma omp parallel for schedule(runtime) collapse(3) for (v1 = 0; v1 < 20; v1 += 2) for (v2 = __LONG_MAX__; v2 > __LONG_MAX__ - 30; v2 -= 3) for (v3 = 10; v3 > 0; v3--) #pragma omp atomic b[v3]++; }
ompsimpletest.c
#include <nautilus/nautilus.h> #include <nautilus/shell.h> #include <nautilus/libccompat.h> #define ERROR(fmt, args...) ERROR_PRINT("ompstest: " fmt, ##args) #define DEBUG(fmt, args...) DEBUG_PRINT("ompstest: " fmt, ##args) #define INFO(fmt, args...) INFO_PRINT("ompstest: " fmt, ##args) static int handle_test (char * buf, void * priv) { int i; #pragma omp parallel { /* long id = getpid(); */ /* nk_vc_printf("****omptest==thread id %d", id); */ #pragma omp for private(i) schedule(static, 1) for( i=0;i<8;i++){ long id = getpid(); nk_vc_printf("****omptest==thread id %d\n", id); nk_vc_printf("*****working %d\n",i); } } return 0; } static struct shell_cmd_impl omptest_impl = { .cmd = "ompstest", .help_str = "ompstest (openmp simple test)", .handler = handle_test, }; nk_register_shell_cmd(omptest_impl);
mini.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> typedef struct { long long int re; long long int im; } com; typedef struct { com x; com y; } PO; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; unsigned int xQ20; unsigned int xQ21; unsigned int yQ20; unsigned int yQ21; unsigned int xP20; unsigned int xP21; unsigned int yP20; unsigned int yP21; unsigned int xR20; unsigned int xR21; unsigned int xQ30; unsigned int xQ31; unsigned int yQ30; unsigned int yQ31; unsigned int xP30; unsigned int xP31; unsigned int yP30; unsigned int yP31; unsigned int xR30; unsigned int xR31; unsigned int n; } SIDH; typedef struct { int n; int p; int q; char s[]; } tor; typedef struct { unsigned int p; unsigned int e2; unsigned int e3; PO P2; PO P3; PO Q2; PO Q3; PO R2; PO R3; unsigned int n; } CM; unsigned int p=431; unsigned int pp=185761; // SIDH sp434; // invert of integer long long int inv(long long int a,long long int n){ long long int d,x,s,q,r,t; d = n; x = 0; s = 1; while (a != 0){ q = d / a; r = d % a; d = a; a = r; t = x - q * s; x = s; s = t; } // gcd = d; // $\gcd(a, n)$ return ((x + n) % (n / d)); } //SIDH com cadd(com a,com b){ com c; c.re=(a.re+b.re); if(c.re>p) c.re=c.re%p; if(c.re<0) c.re+=p; c.im=(a.im+b.im); if(c.im>p) c.im=c.im%p; if(c.im<0) c.im=c.im+p; return c; } com inv_add(com a){// -a com c; c.re= -1; c.im= -1; c.re=c.re*a.re%p; if(c.re>p) c.re%=p; c.im=c.im*a.im%p; if(c.im>p) c.im%=p; return c; } com csub(com a,com b){ com c,m; c.re=(a.re-b.re); if(c.re<0) c.re+=p; c.im=(a.im-b.im); if(c.im<0) c.im+=p; return c; } com cmul(com a,com b){ com c; long long int d,e; c.re=a.re*b.re-(a.im*b.im); d=(a.re*b.im);//%p; e=(b.re*a.im);//%p; // c.re=c.re+c.im;//%p; c.im=d+e;//%p; return c; } com cinv(com a){ com c,a1,a2,b1,b2,h,w; unsigned int i,j,d,e,f,g,A,pp,l,n; for(l=0;l<p;l++){ //#pragma omp parallel for for(n=0;n<p;n++){ //a=162+172i //a2.re=162; //a2.im=172; a2.re=l; //259 a2.im=n; //340 b1=cmul(a2,a); if(b1.re%p==1 && b1.im%p==0){ printf("%d %d %d %d\n",a1.re,a1.im,b1.re%p,b1.im%p); printf("%d %d\n",l,n); // exit(1); return a2; } } } return a2; } com cdiv(com a,com b){ com c,d,v,f,h; long long g; d.re=(b.re*b.re+b.im*b.im)%p; if(d.re>p) d.re=d.re%p; if(d.re<0) d.re=d.re+p; d.im=0; v.re=((a.re%p)*(b.re%p)+((a.im%p)*(b.im%p))%p)%p; v.im=((a.im%p)*(b.re%p))-(a.re%p)*(b.im%p); if(a.re>p) a.re=a.re%p; if(a.re<0) a.re=b.re+p; if(a.im>p) a.im=b.im%p; if(a.im<0) a.re=a.im+p; if(b.re>p) b.re=a.re%p; if(b.re<0) b.re=b.re+p; if(b.im>p) b.im=b.im%p; if(b.im<0) b.re=a.im+p; printf("re=%lld %lld\n",a.re,b.re); printf("imm=%lldi %lldi\n",a.im,b.im); //exit(1); printf("d=%lld\n",d.re); d.re=inv(d.re,p); v.re=((p+v.re)*d.re)%p; v.im=((v.im%p)*d.re)%p; if(v.re>p) v.re=v.re%p; if(v.im<0) v.im+=p; printf("v=%lld %lldi\n",v.re,v.im); // exit(1); //c.re=d.re; //c.im=v.im*inv(d.re,p); return v; } com cnst(unsigned int A,com a){ unsigned int t,s; com r; t=A*a.re; s=A*a.im; r.re=t; r.im=s; return r; } PO eadd(PO P,PO Q){ PO R={0}; unsigned int r,s,t,u,v,w; com c,d,e,f,g,l,A; A.re=6; A.im=0; c=csub(P.y,Q.y); d=csub(P.x,Q.x); e=cinv(d); l=cmul(c,e); d=cmul(l,l); e=cadd(P.x,Q.x); R.x=csub(csub(d,e),A); R.y=csub(cmul(l,csub(P.x,R.x)),P.y); return R; } PO eadd2(PO P){ com a,b,c; PO R; return R; } //E = EllipticCurve(GF(131), [0, 0, 0, 1, 23]) //E.j_invariant() com j_inv(com a){ com r,f,h,b1,b2,h1,o,g,q; // unsigned int w; o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; r=cmul(a,a); //printf("%d %d\n",r.re,r.im); //a^2-4 h=csub(r,f); printf("a^2-4: %lld %lld\n",h.re,h.im); b1=cadd(r,f); printf("%lld %lld\n",b1.re,b1.im); b2=cmul(r,r); h1=cmul(f,f); h1=cadd(h1,b2); printf("%lld %lld\n",h1.re,h1.im); //p=131 のとき y^2 = x^3 + x + 23 の j-不変量は 78 となります。 //g=a^2-3 g=csub(r,o); printf("a^2-3: %d %d\n",g.re,g.im); printf("a^2-4: %lld %lld\n",h.re,h.im); //g=256*(a^2-3)^3 //(a^2 - 3)^2 = -4184900860 - 2323531392 I //(a^2 - 3)^3 = 228212128828152 - 239983944473728 I g=cmul(cmul(cmul(g,g),g),q); g.re=g.re%p; g.im=g.im%p; printf("g=256*(a^2-3)^3: %lld %lld\n",g.re,g.im); g=cdiv(g,h); if(g.re>p) g.re%=p; if(g.re<0) g.re+=p; if(g.im>p) g.im=g.im%p; if(g.im<0) g.im+=p; printf("ans=%lld,%lld\n",g.re,g.im); return g; } /* //jj=aa^bb mod oo BigInt exp(BigInt aa,BigInt bb,BigInt oo){ BigInt ii,jj,kk[8192]; int j,c[8192],count=0,i; ii=oo; j=0; jj=0; // kk[4096]; //prime is 4096 bit table // c[8192] //mod is 8192 bit table count=0; for(i=0;i<8192;i++){ kk[i]=0; } while(ii>0){ ii = (ii>>1); j=j+1; } kk[0]=aa; // std::cout << j << "\n"; //ex.1000=2**3+2**5+2**6+2**7+2**8+2**9 makes a array c=[3,5,6,7,8,9] for(i=0;i<j+1;i++){ if((bb >> i)%2 != 0){ // testbit(bb,i) c[count]=i; count=count+1; } } // std::cout << bb << endl; // std::cout << count << "\n"; //exit(1); for(i=1;i<c[count-1]+1;i++){ kk[i] = kk[i-1]*kk[i-1]%oo; } jj=1; for(i=0;i<count;i++){ jj=kk[c[i]]*jj%oo; if (jj==0){ // print i,"\n" } } return jj; } */ com cc(com a,com b){ com c; c.re= a.re*b.re+a.im*b.im; c.im=0; return c; } int main () { char buf[65536]; CM sp434; com a1,a2,b1,b2,j,r,o,q,g,f,v,w,h,r2,g2,h2,h1,c; int s=31,t=304,l,k,n,i,count=0,a,b,jj,aa,bb,jj2; s=inv(s,p); //a1 v.re=s; v.im=0; t=inv(t,p); //a2 w.re=s; w.im=0; printf("s=%d,t=%d\n",s,t); o.re= 3; o.im= 0; q.re= 256; q.im= 0; f.re=4; f.im=0; //h.re=p; //h.im=0; //q=cdiv(r,o); //printf("%d %d\n",q.re,q.im); //exit(1); //a=161+208i a1.re=161; a1.im=208; j_inv(a1); printf("a1======================================\n"); //exit(1); a2.re=161; //162; a2.im=208;//172; a2=j_inv(a2); printf("j=%d %d\n",a2.re,a2.im); //exit(1); //同じj不変量を持つ楕円曲線を総探索する 20200804 for(i=0;i<p;i++){ o.re=i; for(k=0;k<p;k++){ o.im=k; r=j_inv(o); //scanf("%d",&n); if(r.re==304 && r.im==364){ printf("(i,k)=%d %d\n",i,k); count++; } /* if(i==161 && k==208){ printf("??\n"); exit(1); } */ } } printf("p=%d count=%d\n",p,count); return 0; }
Physics_CellVal.c
/* * SideValues.c * * Created on: Jul 27, 2017 * Author: abauville */ #include "stokes.h" void Physics_CellVal_retrieveFromSolution (compute* Val, int ISub, Grid* Grid, BC* BC, Numbering* Numbering, EqSystem* EqSystem) { // Where Val is the value to extract from the solution, and DVal the increment since the last time step, IStep is the index of the subsystem of equations int I, IBC, INeigh, iy, ix; int INumMap0 = Numbering->subEqSystem0Dir[ISub]; int iCell; compute scale; #pragma omp parallel for private(iy, ix, I, iCell, IBC, INeigh, scale) OMP_SCHEDULE for (iy = 0; iy<Grid->nyEC; iy++) { for (ix = 0; ix<Grid->nxEC; ix++) { iCell = ix + iy*Grid->nxEC; I = Numbering->map[iCell + INumMap0]; scale = 1.0;//EqSystem->S[InoDir]; if (I>=0) { scale = 1.0;//EqSystem->S[I]; Val[iCell] = EqSystem->x[I]*scale; } else { IBC = abs(I)-1; // BC nodes are numbered -1 to -n // Get neighbours index if (iy==0) { // lower boundary if (Grid->isPeriodic){ INeigh = Numbering->map[ ix + (iy+1)*Grid->nxEC + INumMap0 ]; } else { if (ix==0) { INeigh = Numbering->map[ ix+1 + (iy+1)*Grid->nxEC + INumMap0 ]; } else if (ix==Grid->nxEC-1) { INeigh = Numbering->map[ ix-1 + (iy+1)*Grid->nxEC + INumMap0 ]; } else { INeigh = Numbering->map[ ix + (iy+1)*Grid->nxEC + INumMap0 ]; } } } else if (iy==Grid->nyEC-1) { // upper boundary if (Grid->isPeriodic){ INeigh = Numbering->map[ ix + (iy-1)*Grid->nxEC + INumMap0 ]; } else { if (ix==0) { INeigh = Numbering->map[ ix+1 + (iy-1)*Grid->nxEC + INumMap0]; } else if (ix==Grid->nxEC-1) { INeigh = Numbering->map[ ix-1 + (iy-1)*Grid->nxEC + INumMap0 ]; } else { INeigh = Numbering->map[ ix + (iy-1)*Grid->nxEC + INumMap0 ]; } } } else if (ix==0) { // left boundary INeigh = Numbering->map[ ix+1 + (iy)*Grid->nxEC + INumMap0 ]; } else if (ix==Grid->nxEC-1) { // right boundary INeigh = Numbering->map[ ix-1 + (iy)*Grid->nxEC + INumMap0 ]; } else { // Warning: Only works for internal Dirichlet INeigh = 1; // Dummy node //printf("Error in Physics_CellVal_retrieveFromSolution, unexpected case"); //exit(0); } compute neighValue; if (INeigh<0 && BC->type[abs(INeigh)-1]==Dirichlet) { neighValue = BC->value[abs(INeigh)-1]; } else { neighValue = EqSystem->x[INeigh]; } scale = 1.0;//EqSystem->S[INeigh]; //printf("ix, =%i, iy = %i, INeigh =%i, %.1f,BC->type[IBC] = %i\n",ix,iy, INeigh, EqSystem->x[INeigh], BC->type[IBC]); //printf("Val[iCell] = %.1f\n",Val[iCell]); Val[iCell] = Physics_CellVal_SideValues_getFromBC_Local(neighValue*scale, BC, IBC, ix, iy, Grid); } } } } compute Physics_CellVal_SideValues_getFromBC_Local(compute neighValue, BC* BC, int IBC, int ix, int iy, Grid* Grid) { compute sideValue = -1.0; // BCtype: BC->type[IBC] // sideValue: Val[iCell] // neighValue: EqSystem->x[INeigh]*scale // BCValue: BC->value[IBC] if (BC->type[IBC]==DirichletGhost) { // Dirichlet sideValue = 2.0*BC->value[IBC] - neighValue; // printf("IBC %i is Dir Ghost\n",IBC); } else if (BC->type[IBC]==NeumannGhost) { // Neumann if (ix==0) {// left or bottom boundary sideValue = neighValue - BC->value[IBC]*Grid->DXEC[0]; } else if (ix==Grid->nxEC-1) { sideValue = neighValue + BC->value[IBC]*Grid->DXEC[Grid->nxEC-2]; } if (iy==0) { // right or top boundary sideValue = neighValue - BC->value[IBC]*Grid->DYEC[0]; } else if (iy==Grid->nyEC-1) { // right or top boundary sideValue = neighValue + BC->value[IBC]*Grid->DYEC[Grid->nyEC-2]; } } else if (BC->type[IBC]==Dirichlet) { sideValue = BC->value[IBC]; } else if (BC->type[IBC]==Infinity) { if (ix==0) {// left or bottom boundary sideValue = neighValue * BC->DeltaL/(BC->DeltaL+Grid->DXEC[0]) + BC->value[IBC] * Grid->DXEC[0]/(BC->DeltaL+Grid->DXEC[0]); } else if (ix==Grid->nxEC-1) { sideValue = neighValue * BC->DeltaL/(BC->DeltaL+Grid->DXEC[Grid->nxEC-2]) + BC->value[IBC] * Grid->DXEC[Grid->nxEC-2]/(BC->DeltaL+Grid->DXEC[Grid->nxEC-2]); } if (iy==0) { // right or top boundary sideValue = neighValue * BC->DeltaL/(BC->DeltaL+Grid->DYEC[0]) + BC->value[IBC] * Grid->DYEC[0]/(BC->DeltaL+Grid->DYEC[0]); } else if (iy==Grid->nyEC-1) { // right or top boundary sideValue = neighValue * BC->DeltaL/(BC->DeltaL+Grid->DYEC[Grid->nyEC-2]) + BC->value[IBC] * Grid->DYEC[Grid->nyEC-2]/(BC->DeltaL+Grid->DYEC[Grid->nyEC-2]); } } else { sideValue = 0.0; printf("error in Physics_CellVal_retrieveFromSolution: unknown boundary type\n"); exit(0); } return sideValue; } void Physics_CellVal_SideValues_copyNeighbours_Global(compute* ECValues, Grid* Grid) { // Replace boundary values by their neighbours int INeigh, iy, ix, I; // lower boundary iy = 0; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy+1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy+1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy+1)*Grid->nxEC ; } else { INeigh = ix + (iy+1)*Grid->nxEC ; } } ECValues[I] = ECValues[INeigh]; } // upper boundary iy = Grid->nyEC-1; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy-1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy-1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy-1)*Grid->nxEC ; } else { INeigh = ix + (iy-1)*Grid->nxEC ; } } ECValues[I] = ECValues[INeigh]; } if (Grid->isPeriodic) { int Iidentical; // index of the identical node // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = Grid->nxEC-2 + (iy)*Grid->nxEC ; // ECValues[I] = ECValues[Iidentical]; } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = 1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[Iidentical]; } } else { // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix+1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[INeigh]; } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix-1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[INeigh]; } } //printf("end neighbour stuff"); } void Physics_CellVal_SideValues_copyNeighbours_Global_i(int* ECValues, Grid* Grid) { // Replace boundary values by their neighbours int INeigh, iy, ix, I; // lower boundary iy = 0; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy+1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy+1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy+1)*Grid->nxEC ; } else { INeigh = ix + (iy+1)*Grid->nxEC ; } } ECValues[I] = ECValues[INeigh]; } // upper boundary iy = Grid->nyEC-1; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy-1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy-1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy-1)*Grid->nxEC ; } else { INeigh = ix + (iy-1)*Grid->nxEC ; } } ECValues[I] = ECValues[INeigh]; } if (Grid->isPeriodic) { int Iidentical; // index of the identical node // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = Grid->nxEC-2 + (iy)*Grid->nxEC ; // ECValues[I] = ECValues[Iidentical]; } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = 1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[Iidentical]; } } else { // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix+1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[INeigh]; } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix-1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[INeigh]; } } } void Physics_CellVal_SideValues_getFromBC_Global(compute* ECValues, Grid* Grid, BC* BC, Numbering* Numbering) { // Replace boundary values by their neighbours int INeigh, iy, ix, I; int IBC; // lower boundary iy = 0; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy+1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy+1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy+1)*Grid->nxEC ; } else { INeigh = ix + (iy+1)*Grid->nxEC ; } } IBC = abs(Numbering->map[I])-1; // BC nodes are numbered -1 to -n ECValues[I] = Physics_CellVal_SideValues_getFromBC_Local(ECValues[INeigh], BC, IBC, ix, iy, Grid); } // upper boundary iy = Grid->nyEC-1; for (ix = 0; ix<Grid->nxEC; ix++) { I = ix + iy*Grid->nxEC; if (Grid->isPeriodic) { INeigh = ix + (iy-1)*Grid->nxEC ; } else { if (ix==0) { INeigh = ix+1 + (iy-1)*Grid->nxEC ; } else if (ix==Grid->nxEC-1) { INeigh = ix-1 + (iy-1)*Grid->nxEC ; } else { INeigh = ix + (iy-1)*Grid->nxEC ; } } IBC = abs(Numbering->map[I])-1; // BC nodes are numbered -1 to -n ECValues[I] = Physics_CellVal_SideValues_getFromBC_Local(ECValues[INeigh], BC, IBC, ix, iy, Grid); } if (Grid->isPeriodic) { int Iidentical; // index of the identical node // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = Grid->nxEC-2 + (iy)*Grid->nxEC ; // ECValues[I] = ECValues[Iidentical]; } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; Iidentical = 1 + (iy)*Grid->nxEC ; ECValues[I] = ECValues[Iidentical]; } } else { // left boundary ix = 0; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix+1 + (iy)*Grid->nxEC ; IBC = abs(Numbering->map[I])-1; // BC nodes are numbered -1 to -n ECValues[I] = Physics_CellVal_SideValues_getFromBC_Local(ECValues[INeigh], BC, IBC, ix, iy, Grid); } // right boundary ix = Grid->nxEC-1; for (iy = 1; iy<Grid->nyEC-1; iy++) { I = ix + iy*Grid->nxEC; INeigh = ix-1 + (iy)*Grid->nxEC ; IBC = abs(Numbering->map[I])-1; // BC nodes are numbered -1 to -n ECValues[I] = Physics_CellVal_SideValues_getFromBC_Local(ECValues[INeigh], BC, IBC, ix, iy, Grid); } } } void Physics_CellVal_advectEulerian(compute *A, Model* Model) { Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); compute* Anew = (compute*) malloc(Grid->nECTot * sizeof(compute)); int ix, iy; int iC, iN, iS, iW, iE, iVxW, iVxE, iVyS, iVyN; compute dAdx_W, dAdx_E, dAdy_S, dAdy_N; compute dx = Grid->dx; compute dy = Grid->dy; compute dt = Physics->dt; for (iy = 1; iy < Grid->nyEC-1; ++iy) { for (ix = 1; ix < Grid->nxEC-1; ++ix) { // Cell indices iC = ix + (iy )*Grid->nxEC; iN = ix + (iy+1)*Grid->nxEC; iS = ix + (iy-1)*Grid->nxEC; iW = ix-1 + (iy )*Grid->nxEC; iE = ix+1 + (iy )*Grid->nxEC; iVxW = ix-1 + iy+Grid->nxVx; iVxE = ix + iy+Grid->nxVx; iVyS = ix + (iy-1)*Grid->nxVy; iVyN = ix + (iy )*Grid->nxVy; dAdx_W = (A[iC] - A[iW])/dx; dAdx_E = (A[iE] - A[iC])/dx; dAdy_S = (A[iC] - A[iS])/dy; dAdy_N = (A[iN] - A[iC])/dy; Anew[iC] = A[iC] + dt* ( - .5*(Physics->Vx[iVxW]*dAdx_W + Physics->Vx[iVxE]*dAdx_E) - .5*(Physics->Vy[iVyS]*dAdy_S + Physics->Vy[iVyN]*dAdy_N) ); } } for (iy = 1; iy < Grid->nyEC-1; ++iy) { for (ix = 1; ix < Grid->nxEC-1; ++ix) { iC = ix + (iy )*Grid->nxEC; A[iC] = Anew[iC]; } } Physics_CellVal_SideValues_copyNeighbours_Global(A, Grid); free(Anew); }
btheader.h
// a header with OpenMP directive #define PROBLEM_SIZE 1024 static double cuf[PROBLEM_SIZE]; static double q[PROBLEM_SIZE]; static double ue[PROBLEM_SIZE][5]; static double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cuf, q, ue, buf)
par_vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParVectorCreate *--------------------------------------------------------------------------*/ /* If create is called and partitioning is NOT null, then it is assumed that it is array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParVector * hypre_ParVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning ) { hypre_ParVector *vector; HYPRE_Int num_procs, my_id; if (global_size < 0) { hypre_error_in_arg(2); return NULL; } vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); if (!partitioning) { hypre_MPI_Comm_size(comm,&num_procs); hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning); } hypre_ParVectorAssumedPartition(vector) = NULL; hypre_ParVectorComm(vector) = comm; hypre_ParVectorGlobalSize(vector) = global_size; hypre_ParVectorFirstIndex(vector) = partitioning[0]; hypre_ParVectorLastIndex(vector) = partitioning[1]-1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[1] - partitioning[0]); /* set defaults */ hypre_ParVectorOwnsData(vector) = 1; hypre_ParVectorOwnsPartitioning(vector) = 1; hypre_ParVectorActualLocalSize(vector) = 0; return vector; } /*-------------------------------------------------------------------------- * hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParMultiVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning, HYPRE_Int num_vectors ) { /* note that global_size is the global length of a single vector */ hypre_ParVector *vector = hypre_ParVectorCreate( comm, global_size, partitioning ); hypre_ParVectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorDestroy( hypre_ParVector *vector ) { if (vector) { if ( hypre_ParVectorOwnsData(vector) ) { hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector)); } if ( hypre_ParVectorOwnsPartitioning(vector) ) { hypre_TFree(hypre_ParVectorPartitioning(vector), HYPRE_MEMORY_HOST); } if (hypre_ParVectorAssumedPartition(vector)) { hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector)); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorInitialize_v2( hypre_ParVector *vector, HYPRE_MemoryLocation memory_location ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_SeqVectorInitialize_v2(hypre_ParVectorLocalVector(vector), memory_location); hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector)); return hypre_error_flag; } HYPRE_Int hypre_ParVectorInitialize( hypre_ParVector *vector ) { return hypre_ParVectorInitialize_v2(vector, hypre_ParVectorMemoryLocation(vector)); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetDataOwner( hypre_ParVector *vector, HYPRE_Int owns_data ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsData(vector) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetPartitioningOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector, HYPRE_Int owns_partitioning ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetNumVectors * call before calling hypre_ParVectorInitialize * probably this will do more harm than good, use hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ #if 0 HYPRE_Int hypre_ParVectorSetNumVectors( hypre_ParVector *vector, HYPRE_Int num_vectors ) { HYPRE_Int ierr=0; hypre_Vector *local_vector = hypre_ParVectorLocalVector(v); hypre_SeqVectorSetNumVectors( local_vector, num_vectors ); return ierr; } #endif /*-------------------------------------------------------------------------- * hypre_ParVectorRead *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorRead( MPI_Comm comm, const char *file_name ) { char new_file_name[80]; hypre_ParVector *par_vector; HYPRE_Int my_id, num_procs; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; HYPRE_Int i; FILE *fp; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "r"); hypre_fscanf(fp, "%b\n", &global_size); for (i=0; i < 2; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_ParVectorComm(par_vector) = comm; hypre_ParVectorGlobalSize(par_vector) = global_size; hypre_ParVectorFirstIndex(par_vector) = partitioning[0]; hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1; hypre_ParVectorPartitioning(par_vector) = partitioning; hypre_ParVectorOwnsData(par_vector) = 1; hypre_ParVectorOwnsPartitioning(par_vector) = 1; hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 ); return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrint( hypre_ParVector *vector, const char *file_name ) { char new_file_name[80]; hypre_Vector *local_vector; MPI_Comm comm; HYPRE_Int my_id, num_procs, i; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; FILE *fp; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(vector); comm = hypre_ParVectorComm(vector); partitioning = hypre_ParVectorPartitioning(vector); global_size = hypre_ParVectorGlobalSize(vector); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_SeqVectorPrint(local_vector,new_file_name); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "w"); hypre_fprintf(fp, "%b\n", global_size); for (i=0; i < 2; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); fclose (fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetConstantValues( hypre_ParVector *v, HYPRE_Complex value ) { hypre_Vector *v_local = hypre_ParVectorLocalVector(v); return hypre_SeqVectorSetConstantValues(v_local,value); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetRandomValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetRandomValues( hypre_ParVector *v, HYPRE_Int seed ) { HYPRE_Int my_id; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); MPI_Comm comm = hypre_ParVectorComm(v); hypre_MPI_Comm_rank(comm,&my_id); seed *= (my_id+1); return hypre_SeqVectorSetRandomValues(v_local, seed); } /*-------------------------------------------------------------------------- * hypre_ParVectorCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorCopy( hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorCopy(x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorCloneShallow * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using * the partitioning and data arrays of x *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParVectorCloneShallow( hypre_ParVector *x ) { hypre_ParVector * y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; /* ...This vector owns its local vector, although the local vector doesn't * own _its_ data */ hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(hypre_ParVectorLocalVector(x) ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); return y; } hypre_ParVector * hypre_ParVectorCloneDeep_v2( hypre_ParVector *x, HYPRE_MemoryLocation memory_location ) { hypre_ParVector *y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneDeep_v2( hypre_ParVectorLocalVector(x), memory_location ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); //RL: WHY HERE? return y; } HYPRE_Int hypre_ParVectorMigrate(hypre_ParVector *x, HYPRE_MemoryLocation memory_location) { if (!x) { return hypre_error_flag; } if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(hypre_ParVectorMemoryLocation(x)) ) { hypre_Vector *x_local = hypre_SeqVectorCloneDeep_v2(hypre_ParVectorLocalVector(x), memory_location); hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(x)); hypre_ParVectorLocalVector(x) = x_local; } else { hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(x)) = memory_location; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorScale( HYPRE_Complex alpha, hypre_ParVector *y ) { hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorScale( alpha, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorAxpy( HYPRE_Complex alpha, hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorAxpy( alpha, x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParVectorInnerProd( hypre_ParVector *x, hypre_ParVector *y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real result = 0.0; HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif return result; } /*-------------------------------------------------------------------------- * hypre_ParVectorElmdivpy * y = y + x ./ b [MATLAB Notation] *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorElmdivpy( hypre_ParVector *x, hypre_ParVector *b, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorElmdivpy(x_local, b_local, y_local); } /*-------------------------------------------------------------------------- * hypre_VectorToParVector: * generates a ParVector from a Vector on proc 0 and distributes the pieces * to the other procs in comm *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_VectorToParVector ( MPI_Comm comm, hypre_Vector *v, HYPRE_BigInt *vec_starts ) { HYPRE_BigInt global_size; HYPRE_BigInt *global_vec_starts = NULL; HYPRE_BigInt first_index; HYPRE_BigInt last_index; HYPRE_Int local_size; HYPRE_Int num_vectors; HYPRE_Int num_procs, my_id; HYPRE_Int global_vecstride, vecstride, idxstride; hypre_ParVector *par_vector; hypre_Vector *local_vector; HYPRE_Complex *v_data; HYPRE_Complex *local_data; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; HYPRE_Int i, j, k, p; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == 0) { global_size = (HYPRE_BigInt)hypre_VectorSize(v); v_data = hypre_VectorData(v); num_vectors = hypre_VectorNumVectors(v); /* for multivectors */ global_vecstride = hypre_VectorVectorStride(v); } hypre_MPI_Bcast(&global_size,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&num_vectors,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&global_vecstride,1,HYPRE_MPI_INT,0,comm); if ( num_vectors == 1 ) par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts); else par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors); vec_starts = hypre_ParVectorPartitioning(par_vector); first_index = hypre_ParVectorFirstIndex(par_vector); last_index = hypre_ParVectorLastIndex(par_vector); local_size = (HYPRE_Int)(last_index - first_index) + 1; if (my_id == 0) { global_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); } hypre_MPI_Gather(&first_index, 1, HYPRE_MPI_BIG_INT, global_vec_starts, 1, HYPRE_MPI_BIG_INT, 0, comm); if (my_id == 0) { global_vec_starts[num_procs] = hypre_ParVectorGlobalSize(par_vector); } hypre_ParVectorInitialize(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); local_data = hypre_VectorData(local_vector); vecstride = hypre_VectorVectorStride(local_vector); idxstride = hypre_VectorIndexStride(local_vector); /* so far the only implemented multivector StorageMethod is 0 */ hypre_assert( idxstride==1 ); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); k = 0; for (p = 1; p<num_procs; p++) for (j = 0; j<num_vectors; ++j) { hypre_MPI_Isend( &v_data[(HYPRE_Int) global_vec_starts[p]] + j*global_vecstride, (HYPRE_Int)(global_vec_starts[p+1] - global_vec_starts[p]), HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] ); } if (num_vectors == 1) { for (i = 0; i < local_size; i++) local_data[i] = v_data[i]; } else { for (j = 0; j<num_vectors; ++j) { for (i = 0; i < local_size; i++) local_data[i+j*vecstride] = v_data[i+j*global_vecstride]; } } hypre_MPI_Waitall(num_procs-1,requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } else { for ( j=0; j<num_vectors; ++j ) hypre_MPI_Recv( local_data+j*vecstride, local_size, HYPRE_MPI_COMPLEX, 0, 0, comm,&status0 ); } if (global_vec_starts) { hypre_TFree(global_vec_starts, HYPRE_MEMORY_HOST); } return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorToVectorAll: * generates a Vector on every proc which has a piece of the data * from a ParVector on several procs in comm, * vec_starts needs to contain the partitioning across all procs in comm *--------------------------------------------------------------------------*/ hypre_Vector * hypre_ParVectorToVectorAll( hypre_ParVector *par_v ) { MPI_Comm comm = hypre_ParVectorComm(par_v); HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v); hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v); HYPRE_Int num_procs, my_id; HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v); hypre_Vector *vector; HYPRE_Complex *vector_data; HYPRE_Complex *local_data; HYPRE_Int local_size; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int i, j; HYPRE_Int *used_procs; HYPRE_Int num_types, num_requests; HYPRE_Int vec_len, proc_id; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 112, tag2 = 223; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) - hypre_ParVectorFirstIndex(par_v) + 1); /* determine procs which hold data of par_v and store ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_size > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParVectorLastIndex(par_v); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToVectorAll; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), //0, &response_obj, sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_size) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = (HYPRE_Int)send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_Int)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_size) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this vector should be rather small */ local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate((HYPRE_Int)global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); num_requests = 2*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector - here we send to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], tag2, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrintIJ( hypre_ParVector *vector, HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt global_size, j; HYPRE_BigInt *partitioning; HYPRE_Complex *local_data; HYPRE_Int myid, num_procs, i, part0; char new_filename[255]; FILE *file; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParVectorComm(vector); global_size = hypre_ParVectorGlobalSize(vector); partitioning = hypre_ParVectorPartitioning(vector); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector)); hypre_fprintf(file, "%b \n", global_size); for (i=0; i < 2; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } hypre_fprintf(file, "\n"); part0 = partitioning[0]; for (j = part0; j < partitioning[1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorReadIJ * Warning: wrong base for assumed partition if base > 0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_j_ptr, hypre_ParVector **vector_ptr ) { HYPRE_BigInt global_size, J; hypre_ParVector *vector; hypre_Vector *local_vector; HYPRE_Complex *local_data; HYPRE_BigInt *partitioning; HYPRE_Int base_j; HYPRE_Int myid, num_procs, i, j; char new_filename[255]; FILE *file; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b", &global_size); /* this may need to be changed so that the base is available in the file! */ partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 0; i < 2; i++) { hypre_fscanf(file, "%b", partitioning+i); } /* This is not yet implemented correctly! */ base_j = 0; vector = hypre_ParVectorCreate(comm, global_size, partitioning); hypre_ParVectorInitialize(vector); local_vector = hypre_ParVectorLocalVector(vector); local_data = hypre_VectorData(local_vector); for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } fclose(file); *base_j_ptr = base_j; *vector_ptr = vector; /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToVectorAll * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToVectorAll( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the vector * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector ) { return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) ); } HYPRE_Int hypre_ParVectorGetValuesHost(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_BigInt base, HYPRE_Complex *values) { HYPRE_Int i, ierr = 0; HYPRE_BigInt first_index = hypre_ParVectorFirstIndex(vector); HYPRE_BigInt last_index = hypre_ParVectorLastIndex(vector); hypre_Vector *local_vector = hypre_ParVectorLocalVector(vector); HYPRE_Complex *data = hypre_VectorData(local_vector); /* if (hypre_VectorOwnsData(local_vector) == 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues."); return hypre_error_flag; } */ if (indices) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:ierr) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_values; i++) { HYPRE_BigInt index = indices[i] - base; if (index < first_index || index > last_index) { ierr ++; } else { HYPRE_Int local_index = (HYPRE_Int) (index - first_index); values[i] = data[local_index]; } } if (ierr) { hypre_error_in_arg(3); hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Index out of range! -- hypre_ParVectorGetValues."); hypre_printf("Index out of range! -- hypre_ParVectorGetValues\n"); } } else { if (num_values > hypre_VectorSize(local_vector)) { hypre_error_in_arg(2); return hypre_error_flag; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_values; i++) { values[i] = data[i]; } } return hypre_error_flag; } HYPRE_Int hypre_ParVectorGetValues2(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_BigInt base, HYPRE_Complex *values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (HYPRE_EXEC_DEVICE == hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(vector) )) { hypre_ParVectorGetValuesDevice(vector, num_values, indices, base, values); } else #endif { hypre_ParVectorGetValuesHost(vector, num_values, indices, base, values); } return hypre_error_flag; } HYPRE_Int hypre_ParVectorGetValues(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { return hypre_ParVectorGetValues2(vector, num_values, indices, 0, values); }
LAGraph_cc_fastsv2.c
/* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" static inline void atomic_min_uint64 ( uint64_t *p, // input/output uint64_t value // input ) { uint64_t old, new ; do { // get the old value at (*p) #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } #define LAGRAPH_FREE_ALL //------------------------------------------------------------------------------ // Reduce_assign: w (index) += src //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT64, descriptor = NULL // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. static GrB_Info Reduce_assign ( GrB_Vector w, // vector of size n, all entries present GrB_Vector src, // vector of size n, all entries present GrB_Index *index, // array of size n GrB_Index n, GrB_Index *I, // size n, containing [0, 1, 2, ..., n-1] GrB_Index *mem, int nthreads ) { GrB_Index nw, ns; LAGr_Vector_nvals(&nw, w); LAGr_Vector_nvals(&ns, src); GrB_Index *sval = mem, *wval = sval + nw; LAGr_Vector_extractTuples(NULL, wval, &nw, w); LAGr_Vector_extractTuples(NULL, sval, &ns, src); #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index i = 0; i < n; i++) { atomic_min_uint64 (&(wval [index [i]]), sval [i]) ; // if (sval[i] < wval[index[i]]) // wval[index[i]] = sval[i]; } } else #endif { for (GrB_Index i = 0; i < n; i++) { if (sval[i] < wval[index[i]]) wval[index[i]] = sval[i]; } } LAGr_Vector_clear(w); LAGr_Vector_build(w, I, wval, nw, GrB_PLUS_UINT64); return GrB_SUCCESS; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I); \ LAGRAPH_FREE (V); \ LAGRAPH_FREE (mem); \ LAGr_free (&f) ; \ LAGr_free (&gp); \ LAGr_free (&mngp); \ LAGr_free (&gp_new); \ LAGr_free (&mod); \ if (sanitize) LAGr_free (&S); \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv2 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv2 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info; GrB_Index n, *mem = NULL, *I = NULL, *V = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; LAGr_Matrix_nrows (&n, A) ; if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // vectors LAGr_Vector_new(&f, GrB_UINT64, n); LAGr_Vector_new(&gp_new, GrB_UINT64, n); LAGr_Vector_new(&mod, GrB_BOOL, n); // temporary arrays I = LAGraph_malloc (n, sizeof(GrB_Index)); V = LAGraph_malloc (n, sizeof(uint64_t)) ; mem = (GrB_Index*) LAGraph_malloc (2*n, sizeof(GrB_Index)) ; // prepare vectors for (GrB_Index i = 0; i < n; i++) I[i] = V[i] = i; LAGr_Vector_build (f, I, V, n, GrB_PLUS_UINT64); LAGr_Vector_dup (&gp, f); LAGr_Vector_dup (&mngp,f); // main computation bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, 0, GrB_MIN_UINT64, GxB_MIN_SECOND_UINT64, S, gp, 0); LAGRAPH_OK (Reduce_assign (f, mngp, V, n, I, mem, nthreads)); LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, mngp, 0); LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, gp, 0); // calculate grandparent LAGr_Vector_extractTuples (NULL, V, &n, f); LAGr_extract (gp_new, 0, 0, f, V, n, 0); // check termination LAGr_eWiseMult (mod, 0, 0, GrB_NE_UINT64, gp_new, gp, 0); LAGr_reduce (&diff, 0, GxB_LOR_BOOL_MONOID, mod, 0); // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } // free workspace and return result *result = f; f = NULL ; LAGRAPH_FREE_ALL ; return GrB_SUCCESS; }
6664.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
DRB073-doall2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* Two-dimensional array computation using loops: missing private(j). References to j in the loop cause data races. Data race pairs (we allow multiple ones to preserve the pattern): Write_set = {j@61:10, j@61:20} Read_set = {j@62:20, j@62:12, j61@:14, j61@:20} Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair. */ int a[100][100]; int main() { int i,j; #pragma omp parallel for private(i ,j ) for (i=0;i<100;i++) #pragma omp parallel for private(j ) for (j=0;j<100;j++) a[i][j]=i; #pragma omp parallel for private(i ,j ) for (i=0;i<100;i++) #pragma omp parallel for private(j ) for (j=0;j<100;j++) a[i][j]=a[i][j]+1; for (i=0;i<100;i++) for (j=0;j<100;j++) printf("%d\n", a[i][j]); return 0; }
GxB_SelectOp_wait.c
//------------------------------------------------------------------------------ // GxB_SelectOp_wait: wait for a user-defined GxB_SelectOp to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GxB_SelectOp has no pending // operations to wait for. All this method does is verify that the op is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GxB_SelectOp_wait // no work, just check if the GxB_SelectOp is valid ( GxB_SelectOp *op ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE1 ("GxB_SelectOp_wait (&op)") ; GB_RETURN_IF_NULL (op) ; GB_RETURN_IF_NULL_OR_FAULTY (*op) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
race.c
#include <stdio.h> #include <omp.h> int main(){ int x = 2; #pragma omp parallel shared(x) { printf("Number of threads: %d\n", omp_get_num_threads()); if (omp_get_thread_num() == 0) { x = 5; } else { /* Print 1: the following read of x has a race */ printf("1: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } #pragma omp barrier if (omp_get_thread_num() == 0) { /* Print 2 */ printf("2: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } else { /* Print 3 */ printf("3: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } } return 0; }
task-taskgroup-unrelated.c
/* * task-taskgroup-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); // Give master thread time to execute the task in the taskgroup. OMPT_WAIT(a, 2); } #pragma omp taskgroup { #pragma omp task if (0) { // Dummy task. } // Give other threads time to steal the tasks. OMPT_WAIT(a, 1); OMPT_SIGNAL(a); } var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:46 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:28 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
cast_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jiejun@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> #include "compiler_fp16.h" #include "cast_param.h" static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { #if MACOS #else struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct cast_param* cast_param = ( struct cast_param* )ir_node->op.param_mem; int type_from = cast_param->type_from; int type_to = cast_param->type_to; int channel_num = input_tensor->dims[1]; int batch_number = input_tensor->dims[0]; int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); int num_thread = exec_graph->num_thread; if (type_from == 1 && type_to == 2) { float* idata = ( float* )input_tensor->data; __fp16* odata = ( __fp16* )output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (channel_num * batch_number); i++) { int offset = i * channel_size; for (int j = 0; j < channel_size; j++) { odata[j + offset] = fp32_to_fp16(idata[j + offset]); } } } if (type_from == 2 && type_to == 1) { __fp16* idata = ( __fp16* )input_tensor->data; float* odata = ( float* )output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (channel_num * batch_number); i++) { int offset = i * channel_size; for (int j = 0; j < channel_size; j++) { odata[j + offset] = fp16_to_fp32(idata[j + offset]); } } } #endif return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { struct ir_node* ir_node = exec_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); if (input_tensor->layout != TENGINE_LAYOUT_NCHW) return 0; return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_cast_hcl_ops(void* arg) { return register_builtin_node_ops(OP_CAST, &hcl_node_ops); } static int unreg_cast_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_CAST, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_cast_hcl_ops); AUTO_UNREGISTER_OPS(unreg_cast_hcl_ops);
wow_srp_fmt_plug.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2012. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2012 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * * This implements the SRP protocol, with Blizzard's (battlenet) documented * implementation specifics. * * U = username in upper case * P = password in upper case * s = random salt value. * * x = SHA1(s . SHA1(U . ":" . P)); * v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719 * * v is the 'verifier' value (256 bit value). * * Added OMP. Added 'default' oSSL BigNum exponentiation. * GMP exponentation (faster) is optional, and controled with HAVE_LIBGMP in autoconfig.h */ #if FMT_EXTERNS_H extern struct fmt_main fmt_blizzard; #elif FMT_REGISTERS_H john_register_one(&fmt_blizzard); #else #if AC_BUILT /* we need to know if HAVE_LIBGMP is defined */ #include "autoconfig.h" #endif #include <string.h> #include "sha.h" #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "unicode.h" /* For encoding-aware uppercasing */ #ifdef HAVE_LIBGMP #if HAVE_GMP_GMP_H #include "gmp/gmp.h" #else #include "gmp.h" #endif #define EXP_STR " GMP-exp" #else #include <openssl/bn.h> #define EXP_STR " oSSL-exp" #endif #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 64 #endif #include "memdbg.h" #define FORMAT_LABEL "WoWSRP" #define FORMAT_NAME "Battlenet" #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR EXP_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define WOWSIG "$WoWSRP$" #define WOWSIGLEN 8 // min plaintext len is 8 PW's are only alpha-num uppercase #define PLAINTEXT_LENGTH 16 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 4 #define BINARY_ALIGN 4 #define FULL_BINARY_SIZE 32 #define SALT_SIZE (64+3) #define SALT_ALIGN 1 #define USERNAMELEN 32 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 4 // salt is in hex (salt and salt2) static struct fmt_tests tests[] = { {WOWSIG"6D00CD214C8473C7F4E9DC77AE8FC6B3944298C48C7454E6BB8296952DCFE78D$73616C74", "PASSWORD", {"SOLAR"}}, {WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432", "PASSWORD2", {"DIZ"}}, {WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432*DIZ", "PASSWORD2"}, {NULL} }; #ifdef HAVE_LIBGMP typedef struct t_SRP_CTX { mpz_t z_mod, z_base, z_exp, z_rop; } SRP_CTX; #else typedef struct t_SRP_CTX { BIGNUM *z_mod, *z_base, *z_exp, *z_rop; BN_CTX *BN_ctx; }SRP_CTX; #endif static SRP_CTX *pSRP_CTX; static unsigned char saved_salt[SALT_SIZE]; static unsigned char user_id[USERNAMELEN]; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[8]; static void init(struct fmt_main *self) { int i; #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); pSRP_CTX = mem_calloc_tiny(sizeof(*pSRP_CTX) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_init_set_str(pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719", 10); mpz_init_set_str(pSRP_CTX[i].z_base, "47", 10); mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10); mpz_init(pSRP_CTX[i].z_rop); // Now, properly initialzed mpz_exp, so it is 'large enough' to hold any SHA1 value // we need to put into it. Then we simply need to copy in the data, and possibly set // the limb count size. mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159); #else pSRP_CTX[i].z_mod=BN_new(); BN_dec2bn(&pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719"); pSRP_CTX[i].z_base=BN_new(); BN_set_word(pSRP_CTX[i].z_base, 47); pSRP_CTX[i].z_exp=BN_new(); pSRP_CTX[i].z_rop=BN_new(); pSRP_CTX[i].BN_ctx = BN_CTX_new(); #endif } } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; if (strncmp(ciphertext, WOWSIG, WOWSIGLEN)) return 0; q = p = &ciphertext[WOWSIGLEN]; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; if (q-p != CIPHERTEXT_LENGTH) return 0; if (*q != '$') return 0; ++q; p = strchr(q, '*'); if (!p) return 0; if (((p - q) & 1)) return 0; if (p - q > 2 * SALT_SIZE) return 0; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; if (q != p) return 0; if (strlen(&p[1]) > USERNAMELEN) return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *pFmt) { // if user name not there, then add it static char ct[128+32+1]; char *cp; if (!split_fields[1][0] || strncmp(split_fields[1], WOWSIG, WOWSIGLEN)) return split_fields[1]; cp = strchr(split_fields[1], '*'); if (cp) return split_fields[1]; strnzcpy(ct, split_fields[1], 128); cp = &ct[strlen(ct)]; *cp++ = '*'; strnzcpy(cp, split_fields[0], USERNAMELEN); // upcase user name enc_strupper(cp); return ct; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char ct[128+32+1]; char *cp; strnzcpy(ct, ciphertext, 128+32+1); cp = strchr(ct, '*'); if (cp) *cp = 0; strupr(&ct[WOWSIGLEN]); if (cp) *cp = '*'; return ct; } static void *get_binary(char *ciphertext) { static union { unsigned char b[FULL_BINARY_SIZE]; ARCH_WORD_32 dummy[1]; } out; char *p; int i; p = &ciphertext[WOWSIGLEN]; for (i = 0; i < FULL_BINARY_SIZE; i++) { out.b[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out.b; } static void *salt(char *ciphertext) { static union { unsigned char b[SALT_SIZE]; ARCH_WORD_32 dummy; } out; char *p; int length=0; memset(out.b, 0, SALT_SIZE); p = &ciphertext[WOWSIGLEN+64+1]; while (atoi16[ARCH_INDEX(*p)] != 0x7f) { out.b[++length] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } ++p; out.b[0] = length; memcpy(out.b + length+1, p, strlen(p)+1); return out.b; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } static int salt_hash(void *salt) { unsigned int hash = 0; char *p = (char *)salt; while (*p) { hash <<= 1; hash += (unsigned char)*p++; if (hash >> SALT_HASH_LOG) { hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); } } hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); return hash; } static void set_salt(void *salt) { unsigned char *cp = (unsigned char*)salt; memcpy(saved_salt, &cp[1], *cp); saved_salt[*cp] = 0; strcpy((char*)user_id, (char*)&cp[*cp+1]); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); enc_strupper(saved_key[index]); } static char *get_key(int index) { return saved_key[index]; } // x = SHA1(s, H(U, ":", P)); // v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719 static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int j; #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < count; ++j) { SHA_CTX ctx; unsigned char Tmp[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, user_id, strlen((char*)user_id)); SHA1_Update(&ctx, ":", 1); SHA1_Update(&ctx, saved_key[j], strlen(saved_key[j])); SHA1_Final(Tmp, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, saved_salt, strlen((char*)saved_salt)); SHA1_Update(&ctx, Tmp, 20); SHA1_Final(Tmp, &ctx); // Ok, now Tmp is v #ifdef HAVE_LIBGMP #if 1 // Speed, 17194/s { unsigned char HashStr[80], *p; int i; p = HashStr; for (i = 0; i < 20; ++i) { *p++ = itoa16[Tmp[i]>>4]; *p++ = itoa16[Tmp[i]&0xF]; } *p = 0; mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16); mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod ); mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop); p = HashStr; for (i = 0; i < FULL_BINARY_SIZE; i++) { ((unsigned char*)(crypt_out[j]))[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } } #else // Speed, 17445/s { ARCH_WORD_32 *p1, *p2; // This code works for 32 bit (on LE intel systems). I may need to 'fix' it for 64 bit. // GMP is BE format of a huge 'flat' integer. Thus, we need to put into // BE format (each word), and then put the words themselves, into BE order. // memcpy(z_exp->_mp_d, Tmp, 20); p1 = (ARCH_WORD_32*)Tmp; p2 = (ARCH_WORD_32*)pSRP_CTX[j].z_exp->_mp_d; // NOTE z_exp was allocated 'properly' with 2^160 bit size. if (!p1[0]) { pSRP_CTX[j].z_exp->_mp_size = 4; p2[3] = JOHNSWAP(p1[1]); p2[2] = JOHNSWAP(p1[2]); p2[1] = JOHNSWAP(p1[3]); p2[0] = JOHNSWAP(p1[4]); } else { pSRP_CTX[j].z_exp->_mp_size = 5; p2[4] = JOHNSWAP(p1[0]); p2[3] = JOHNSWAP(p1[1]); p2[2] = JOHNSWAP(p1[2]); p2[1] = JOHNSWAP(p1[3]); p2[0] = JOHNSWAP(p1[4]); } mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod ); // memcpy(crypt_out[j], pSRP_CTX[j].z_rop->_mp_d, 32); p1 = (ARCH_WORD_32*)pSRP_CTX[j].z_rop->_mp_d; p2 = (ARCH_WORD_32*)(crypt_out[j]); p2[7] = JOHNSWAP(p1[0]); p2[6] = JOHNSWAP(p1[1]); p2[5] = JOHNSWAP(p1[2]); p2[4] = JOHNSWAP(p1[3]); p2[3] = JOHNSWAP(p1[4]); p2[2] = JOHNSWAP(p1[5]); p2[1] = JOHNSWAP(p1[6]); p2[0] = JOHNSWAP(p1[7]); } #endif #else // using oSSL's BN to do expmod. pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,20,pSRP_CTX[j].z_exp); BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx); BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j])); #endif } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; ++i) { if (*((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[i]))) return 1; } return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[index])); } static int cmp_exact(char *source, int index) { return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE); } struct fmt_main fmt_blizzard = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, split, get_binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
vvadd_I4_I4_I4.c
#include "vvadd_I4_I4_I4.h" static void __operation( int32_t a, int32_t b, int32_t *ptr_c ) { int32_t c; c = a + b; *ptr_c = c; } int vvadd_I4_I4_I4( const int32_t * restrict in1, const int32_t * restrict in2, uint64_t nR, int32_t * restrict out ) { int status = 0; // TODO #pragma omp parallel for schedule(static, Q_MIN_CHUNK_SIZE_OPENMP) // #pragma omp parallel for schedule(static) num_threads(4) for ( uint64_t i = 0; i < nR; i++ ) { int32_t inv1 = in1[i]; int32_t inv2 = in2[i]; int32_t outv; __operation(inv1, inv2, &outv); out[i] = outv; } return status; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #elif defined(MAGICKCORE_HAVE_LCMS2_H) #include <wchar.h> #include "lcms2.h" #elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H) #include <lcms/lcms.h> #else #include "lcms.h" #endif #endif /* Define declarations. */ #if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000) #define cmsSigCmykData icSigCmykData #define cmsSigGrayData icSigGrayData #define cmsSigLabData icSigLabData #define cmsSigLuvData icSigLuvData #define cmsSigRgbData icSigRgbData #define cmsSigXYZData icSigXYZData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLinkClass icSigLinkClass #define cmsColorSpaceSignature icColorSpaceSignature #define cmsUInt32Number DWORD #define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler) #define cmsCreateTransformTHR(context,source_profile,source_type, \ target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \ source_type,target_profile,target_type,intent,flags); #define cmsOpenProfileFromMemTHR(context,profile,length) \ cmsOpenProfileFromMem(profile,length) #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { char key[MaxTextExtent]; const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); (void) CopyMagickString(key,name,MaxTextExtent); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,key); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static unsigned short **DestroyPixelThreadSet(unsigned short **pixels) { register ssize_t i; assert(pixels != (unsigned short **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (unsigned short *) NULL) pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]); pixels=(unsigned short **) RelinquishMagickMemory(pixels); return(pixels); } static unsigned short **AcquirePixelThreadSet(const size_t columns, const size_t channels) { register ssize_t i; unsigned short **pixels; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(unsigned short **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (unsigned short **) NULL) return((unsigned short **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (unsigned short *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(image,source_profile,source_type, target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif static MagickBooleanType SetAdobeRGB1998ImageProfile(Image *image) { static unsigned char AdobeRGB1998Profile[] = { 0x00, 0x00, 0x02, 0x30, 0x41, 0x44, 0x42, 0x45, 0x02, 0x10, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xd0, 0x00, 0x08, 0x00, 0x0b, 0x00, 0x13, 0x00, 0x33, 0x00, 0x3b, 0x61, 0x63, 0x73, 0x70, 0x41, 0x50, 0x50, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x6f, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x41, 0x44, 0x42, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, 0x32, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x30, 0x00, 0x00, 0x00, 0x6b, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x01, 0x9c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x01, 0xb0, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xc4, 0x00, 0x00, 0x00, 0x0e, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xd4, 0x00, 0x00, 0x00, 0x0e, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xe4, 0x00, 0x00, 0x00, 0x0e, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x01, 0xf4, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x08, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x30, 0x20, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, 0x20, 0x49, 0x6e, 0x63, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x65, 0x64, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x52, 0x47, 0x42, 0x20, 0x28, 0x31, 0x39, 0x39, 0x38, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x18, 0x00, 0x00, 0x4f, 0xa5, 0x00, 0x00, 0x04, 0xfc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x8d, 0x00, 0x00, 0xa0, 0x2c, 0x00, 0x00, 0x0f, 0x95, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x31, 0x00, 0x00, 0x10, 0x2f, 0x00, 0x00, 0xbe, 0x9c }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (GetImageProfile(image,"icm") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(AdobeRGB1998Profile)); SetStringInfoDatum(profile,AdobeRGB1998Profile); status=SetImageProfile(image,"icm",profile); profile=DestroyStringInfo(profile); return(status); } static MagickBooleanType SetsRGBImageProfile(Image *image) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0xee, 0x20, 0x00, 0x00, 0x00, 0x00, 0x04, 0x20, 0x00, 0x00, 0x73, 0x70, 0x61, 0x63, 0x52, 0x47, 0x42, 0x20, 0x4c, 0x61, 0x62, 0x20, 0x07, 0xd7, 0x00, 0x07, 0x00, 0x19, 0x00, 0x00, 0x00, 0x05, 0x00, 0x25, 0x61, 0x63, 0x73, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x34, 0x56, 0x2a, 0xbf, 0x99, 0x4c, 0xcd, 0x06, 0x6d, 0x2c, 0x57, 0x21, 0xd0, 0xd6, 0x8c, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x76, 0x41, 0x32, 0x42, 0x30, 0x00, 0x00, 0x01, 0x68, 0x00, 0x00, 0x74, 0x10, 0x41, 0x32, 0x42, 0x31, 0x00, 0x00, 0x75, 0x78, 0x00, 0x00, 0x01, 0xb4, 0x42, 0x32, 0x41, 0x30, 0x00, 0x00, 0x77, 0x2c, 0x00, 0x00, 0x74, 0x34, 0x42, 0x32, 0x41, 0x31, 0x00, 0x00, 0xeb, 0x60, 0x00, 0x00, 0x01, 0xfc, 0x72, 0x69, 0x67, 0x30, 0x00, 0x00, 0xed, 0x5c, 0x00, 0x00, 0x00, 0x0c, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0xed, 0x68, 0x00, 0x00, 0x00, 0x14, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0xed, 0x7c, 0x00, 0x00, 0x00, 0x76, 0x63, 0x68, 0x61, 0x64, 0x00, 0x00, 0xed, 0xf4, 0x00, 0x00, 0x00, 0x2c, 0x6d, 0x6c, 0x75, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x65, 0x6e, 0x55, 0x53, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x73, 0x00, 0x52, 0x00, 0x47, 0x00, 0x42, 0x00, 0x20, 0x00, 0x76, 0x00, 0x34, 0x00, 0x20, 0x00, 0x49, 0x00, 0x43, 0x00, 0x43, 0x00, 0x20, 0x00, 0x70, 0x00, 0x72, 0x00, 0x65, 0x00, 0x66, 0x00, 0x65, 0x00, 0x72, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x63, 0x00, 0x65, 0x00, 0x20, 0x00, 0x70, 0x00, 0x65, 0x00, 0x72, 0x00, 0x63, 0x00, 0x65, 0x00, 0x70, 0x00, 0x74, 0x00, 0x75, 0x00, 0x61, 0x00, 0x6c, 0x00, 0x20, 0x00, 0x69, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x20, 0x00, 0x62, 0x00, 0x65, 0x00, 0x74, 0x00, 0x61, 0x00, 0x00, 0x6d, 0x41, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x73, 0xec, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x07, 0xf7, 0x80, 0x80, 0x80, 0x80, 0x07, 0xb9, 0x84, 0x8b, 0x77, 0x79, 0x08, 0x42, 0x88, 0x52, 0x6e, 0xa3, 0x09, 0x61, 0x8c, 0x4a, 0x65, 0xcf, 0x0c, 0x7a, 0x90, 0x54, 0x5d, 0xda, 0x0e, 0x9b, 0x94, 0x6f, 0x56, 0x76, 0x11, 0x50, 0x98, 0x8f, 0x4f, 0x12, 0x15, 0x38, 0x9c, 0x52, 0x48, 0xda, 0x19, 0x01, 0x9f, 0xe2, 0x42, 0x8a, 0x1b, 0xc9, 0xa2, 0xab, 0x3d, 0xed, 0x1e, 0x44, 0xa4, 0xf7, 0x3a, 0x07, 0x20, 0xea, 0xa6, 0xf5, 0x36, 0x4e, 0x23, 0xb9, 0xa8, 0xc8, 0x32, 0x8e, 0x26, 0x3f, 0xaa, 0x95, 0x2e, 0xb6, 0x28, 0x93, 0xac, 0x8a, 0x2a, 0x00, 0x2c, 0x1a, 0xae, 0x50, 0x25, 0xb4, 0x2f, 0xd0, 0xb0, 0x03, 0x1f, 0xae, 0x09, 0x99, 0x78, 0x06, 0x86, 0x58, 0x0e, 0x30, 0x7a, 0x97, 0x7c, 0x67, 0x0f, 0xe9, 0x7e, 0xdc, 0x73, 0x23, 0x11, 0xc0, 0x83, 0x38, 0x6a, 0x65, 0x13, 0xf7, 0x87, 0x57, 0x61, 0xb0, 0x16, 0xab, 0x8c, 0x2a, 0x59, 0x90, 0x19, 0x4c, 0x90, 0xb3, 0x51, 0x93, 0x1c, 0x1d, 0x94, 0xdc, 0x4a, 0xc6, 0x1f, 0x61, 0x98, 0xb2, 0x44, 0x5d, 0x22, 0xf0, 0x9c, 0x26, 0x3e, 0xe5, 0x26, 0x09, 0x9e, 0xf0, 0x3a, 0xf2, 0x28, 0x93, 0xa1, 0x42, 0x37, 0x50, 0x2a, 0xfa, 0xa3, 0x41, 0x33, 0xc2, 0x2d, 0x94, 0xa5, 0x12, 0x30, 0x4e, 0x30, 0x7a, 0xa6, 0xe2, 0x2c, 0x40, 0x33, 0x7c, 0xa8, 0x97, 0x28, 0x53, 0x36, 0x46, 0xaa, 0x11, 0x24, 0xb9, 0x14, 0xa0, 0x6f, 0xfd, 0x8b, 0x7e, 0x16, 0x12, 0x72, 0x51, 0x82, 0x38, 0x17, 0x8e, 0x75, 0x89, 0x78, 0xb8, 0x19, 0x41, 0x79, 0x8d, 0x6f, 0x58, 0x1b, 0x5b, 0x7e, 0x44, 0x66, 0x03, 0x1d, 0x91, 0x83, 0x28, 0x5d, 0x2e, 0x20, 0x02, 0x88, 0x51, 0x54, 0xe0, 0x22, 0xee, 0x8c, 0xf9, 0x4d, 0x69, 0x25, 0xb5, 0x90, 0xfb, 0x46, 0xfd, 0x28, 0xd7, 0x94, 0xde, 0x40, 0xde, 0x2b, 0xed, 0x98, 0x02, 0x3c, 0x7c, 0x2e, 0xeb, 0x9a, 0xef, 0x38, 0xa2, 0x31, 0x2d, 0x9d, 0x34, 0x34, 0xc0, 0x34, 0x81, 0xa0, 0x1c, 0x31, 0x6d, 0x37, 0x3c, 0xa2, 0x00, 0x2d, 0xdc, 0x3a, 0x18, 0xa3, 0xaa, 0x2a, 0x60, 0x3c, 0xea, 0xa5, 0x39, 0x26, 0xf8, 0x1e, 0x94, 0x67, 0x75, 0x91, 0x82, 0x1e, 0xa4, 0x69, 0x88, 0x88, 0x21, 0x1e, 0xe3, 0x6b, 0xe5, 0x7e, 0xb3, 0x20, 0x35, 0x70, 0x06, 0x74, 0xad, 0x21, 0xdf, 0x74, 0x1d, 0x6b, 0x22, 0x24, 0x66, 0x79, 0x12, 0x61, 0xdc, 0x27, 0x4c, 0x7f, 0x05, 0x59, 0x16, 0x29, 0x82, 0x84, 0x14, 0x50, 0xb9, 0x2c, 0x47, 0x88, 0x95, 0x49, 0xf7, 0x2e, 0xe9, 0x8c, 0xb6, 0x43, 0xd3, 0x31, 0xca, 0x90, 0x89, 0x3e, 0x96, 0x34, 0xa8, 0x93, 0xc3, 0x3a, 0x2d, 0x36, 0x9e, 0x96, 0x42, 0x35, 0xdd, 0x3a, 0x21, 0x99, 0xb5, 0x32, 0x62, 0x3d, 0xa8, 0x9d, 0x00, 0x2f, 0x16, 0x40, 0xa8, 0x9f, 0x35, 0x2b, 0xbe, 0x43, 0x60, 0xa0, 0xb3, 0x28, 0x71, 0x29, 0xe2, 0x5f, 0x52, 0x97, 0xba, 0x2a, 0x28, 0x60, 0x90, 0x8e, 0xff, 0x27, 0x5c, 0x61, 0x56, 0x84, 0xf9, 0x28, 0x0f, 0x65, 0x30, 0x7b, 0x76, 0x28, 0x8d, 0x69, 0x34, 0x70, 0xfd, 0x2b, 0xb8, 0x6e, 0xb3, 0x67, 0x7e, 0x2e, 0x03, 0x73, 0xfe, 0x5e, 0x2c, 0x30, 0xba, 0x7a, 0x2d, 0x55, 0x63, 0x33, 0x3e, 0x7f, 0xb7, 0x4d, 0x5a, 0x35, 0xcd, 0x84, 0x17, 0x46, 0xe6, 0x37, 0x97, 0x87, 0xee, 0x40, 0x51, 0x39, 0x92, 0x8b, 0x6d, 0x3b, 0x98, 0x3c, 0x0f, 0x8e, 0xd5, 0x37, 0x44, 0x40, 0x6e, 0x93, 0x15, 0x34, 0x12, 0x43, 0x82, 0x96, 0x65, 0x30, 0x77, 0x46, 0x54, 0x99, 0x05, 0x2d, 0x13, 0x49, 0x16, 0x9b, 0x48, 0x29, 0xb2, 0x36, 0x51, 0x56, 0x9d, 0x9f, 0x6b, 0x35, 0x31, 0x57, 0x96, 0x96, 0xa7, 0x33, 0xe1, 0x58, 0xbd, 0x8d, 0xba, 0x31, 0xa5, 0x5a, 0xed, 0x83, 0x91, 0x30, 0xc1, 0x5e, 0x2a, 0x78, 0xa8, 0x32, 0x72, 0x63, 0x12, 0x6d, 0xf8, 0x35, 0x37, 0x68, 0xbf, 0x64, 0x17, 0x38, 0x47, 0x6e, 0xfc, 0x5b, 0x0c, 0x3a, 0x54, 0x74, 0xd4, 0x52, 0x29, 0x3c, 0xd9, 0x7a, 0x56, 0x4a, 0xfa, 0x3e, 0x76, 0x7e, 0xd2, 0x44, 0x73, 0x41, 0xe3, 0x83, 0xf0, 0x3f, 0x20, 0x44, 0x04, 0x87, 0xff, 0x3a, 0x4a, 0x47, 0x70, 0x8c, 0x34, 0x36, 0x59, 0x49, 0xfc, 0x8f, 0xc0, 0x32, 0x50, 0x4c, 0x75, 0x92, 0xd1, 0x2e, 0x96, 0x4f, 0x1c, 0x95, 0x83, 0x2b, 0x0e, 0x41, 0xa7, 0x4e, 0x2e, 0xa6, 0xe8, 0x41, 0x83, 0x4f, 0x0e, 0x9e, 0xac, 0x3f, 0xbb, 0x4f, 0xb4, 0x95, 0xf3, 0x3d, 0x72, 0x50, 0xa2, 0x8c, 0x7f, 0x39, 0xf4, 0x52, 0x60, 0x81, 0x9d, 0x3a, 0x93, 0x57, 0x0f, 0x76, 0x2f, 0x3c, 0xb3, 0x5d, 0x0e, 0x6a, 0xba, 0x3f, 0x2d, 0x63, 0x34, 0x60, 0xbf, 0x41, 0x19, 0x69, 0x5e, 0x57, 0x0f, 0x43, 0xdd, 0x6f, 0xc6, 0x4f, 0x24, 0x46, 0x36, 0x75, 0x5f, 0x48, 0x32, 0x49, 0x01, 0x7a, 0xd3, 0x42, 0x17, 0x4b, 0xe5, 0x80, 0x21, 0x3d, 0x3b, 0x4e, 0x64, 0x84, 0x86, 0x38, 0xab, 0x50, 0xd7, 0x88, 0x76, 0x34, 0x6a, 0x53, 0x26, 0x8b, 0xf6, 0x30, 0x4d, 0x55, 0xea, 0x8f, 0x33, 0x2c, 0x73, 0x4d, 0xf6, 0x46, 0x11, 0xaf, 0x9c, 0x4c, 0xfd, 0x46, 0x5d, 0xa7, 0x32, 0x4c, 0x05, 0x46, 0xc8, 0x9e, 0xd0, 0x49, 0xa4, 0x47, 0x4d, 0x95, 0xb6, 0x45, 0x8c, 0x47, 0x08, 0x8b, 0xd4, 0x45, 0x18, 0x4b, 0x30, 0x80, 0x86, 0x44, 0xb0, 0x50, 0x4e, 0x73, 0x74, 0x46, 0xb3, 0x56, 0xf7, 0x67, 0x96, 0x48, 0xcf, 0x5d, 0xbd, 0x5c, 0xef, 0x4b, 0x59, 0x64, 0x53, 0x54, 0x0b, 0x4d, 0xd4, 0x6a, 0xb8, 0x4c, 0x81, 0x50, 0x4b, 0x70, 0xf5, 0x45, 0xfc, 0x53, 0x2b, 0x76, 0x96, 0x3f, 0xd8, 0x56, 0x1c, 0x7b, 0xd3, 0x3b, 0x5e, 0x58, 0x72, 0x80, 0x77, 0x36, 0xe7, 0x5a, 0x86, 0x84, 0x38, 0x32, 0x8c, 0x5c, 0xd7, 0x87, 0xc1, 0x2e, 0x56, 0x5b, 0xdb, 0x3e, 0xa8, 0xb6, 0xa9, 0x5c, 0x3f, 0x3f, 0xc7, 0xae, 0xc5, 0x59, 0xbd, 0x3f, 0x54, 0xa7, 0x01, 0x56, 0xf5, 0x3e, 0xa9, 0x9f, 0x11, 0x52, 0x65, 0x3d, 0x76, 0x95, 0x7f, 0x50, 0x92, 0x3f, 0xfc, 0x8a, 0x5d, 0x4e, 0xec, 0x43, 0x42, 0x7d, 0xfe, 0x4f, 0x22, 0x48, 0xdf, 0x70, 0xe6, 0x50, 0x52, 0x51, 0x18, 0x63, 0xbb, 0x53, 0x2a, 0x58, 0x87, 0x59, 0xe2, 0x55, 0xb7, 0x5f, 0x4a, 0x51, 0x37, 0x58, 0x53, 0x65, 0xdb, 0x4a, 0x4b, 0x5a, 0xc6, 0x6c, 0x22, 0x43, 0xc0, 0x5d, 0xfa, 0x72, 0x39, 0x3e, 0x52, 0x60, 0x4f, 0x77, 0x45, 0x39, 0x9d, 0x62, 0x64, 0x7b, 0xf7, 0x35, 0x07, 0x64, 0x4f, 0x80, 0x50, 0x30, 0x6f, 0x69, 0x29, 0x39, 0x52, 0xbc, 0x17, 0x68, 0x89, 0x39, 0x1d, 0xb5, 0x2f, 0x67, 0xa8, 0x38, 0xcc, 0xae, 0x48, 0x65, 0x09, 0x37, 0x42, 0xa7, 0x28, 0x61, 0x6e, 0x35, 0x0f, 0x9f, 0xc8, 0x5d, 0xc2, 0x35, 0x36, 0x95, 0x38, 0x5b, 0x74, 0x36, 0xf2, 0x89, 0xd0, 0x5a, 0x24, 0x3a, 0xa1, 0x7c, 0x7b, 0x5a, 0x18, 0x42, 0xc8, 0x6e, 0x14, 0x5c, 0x08, 0x4c, 0x7e, 0x60, 0xba, 0x5e, 0x2a, 0x53, 0xe5, 0x57, 0x4b, 0x61, 0x04, 0x5a, 0x9a, 0x4f, 0x60, 0x62, 0xa5, 0x61, 0x02, 0x48, 0x4e, 0x65, 0xb2, 0x67, 0x84, 0x42, 0x4a, 0x68, 0x73, 0x6d, 0xaf, 0x3c, 0xd7, 0x6a, 0x8c, 0x72, 0xe6, 0x37, 0xd2, 0x6c, 0x5c, 0x77, 0x8c, 0x32, 0xdf, 0x76, 0x98, 0x35, 0xc7, 0xc0, 0x60, 0x75, 0xb6, 0x35, 0x08, 0xb9, 0x92, 0x74, 0xa1, 0x34, 0x23, 0xb3, 0x81, 0x72, 0x89, 0x32, 0x62, 0xad, 0x41, 0x6f, 0x12, 0x2e, 0xab, 0xa6, 0xfb, 0x6d, 0x9e, 0x2c, 0xd6, 0xa0, 0x87, 0x68, 0x70, 0x27, 0xbd, 0x96, 0xf7, 0x66, 0x36, 0x2e, 0x4e, 0x88, 0xb0, 0x64, 0x9f, 0x33, 0x3a, 0x7a, 0x39, 0x65, 0xee, 0x3d, 0x4e, 0x6b, 0x73, 0x67, 0x3d, 0x46, 0xc9, 0x5e, 0xaa, 0x69, 0x7e, 0x4e, 0xf6, 0x55, 0x1c, 0x6a, 0xdb, 0x55, 0x5c, 0x4d, 0x23, 0x6d, 0x41, 0x5c, 0x55, 0x46, 0xd5, 0x6f, 0xd8, 0x62, 0xda, 0x40, 0xc7, 0x72, 0xa3, 0x69, 0x2e, 0x3b, 0x63, 0x74, 0xc8, 0x6e, 0xc5, 0x35, 0xe2, 0x82, 0x4d, 0x33, 0x40, 0xc2, 0x49, 0x82, 0x11, 0x32, 0xad, 0xbc, 0x78, 0x81, 0x6d, 0x31, 0x69, 0xb7, 0x34, 0x80, 0x6a, 0x2f, 0xaa, 0xb2, 0x18, 0x7d, 0xd6, 0x2d, 0x34, 0xac, 0x2c, 0x7a, 0xcf, 0x2a, 0xdd, 0xa5, 0x6a, 0x77, 0xd9, 0x28, 0x05, 0x9e, 0x7f, 0x74, 0x2b, 0x25, 0x74, 0x94, 0x3c, 0x71, 0xa5, 0x27, 0x0a, 0x87, 0x7e, 0x70, 0xdb, 0x2f, 0x37, 0x78, 0x00, 0x71, 0x2b, 0x38, 0x14, 0x68, 0xb5, 0x72, 0x5c, 0x42, 0x16, 0x5c, 0x86, 0x73, 0xe1, 0x49, 0xfb, 0x52, 0xd4, 0x76, 0x24, 0x51, 0xba, 0x4b, 0xcd, 0x78, 0x6b, 0x58, 0xc3, 0x45, 0xb3, 0x7a, 0xcc, 0x5f, 0x68, 0x3f, 0xce, 0x7c, 0xbe, 0x65, 0x5c, 0x39, 0xcb, 0x8c, 0x5f, 0x31, 0xaa, 0xc3, 0xa1, 0x8c, 0x4f, 0x31, 0x69, 0xbe, 0x08, 0x8b, 0xc6, 0x30, 0x09, 0xb9, 0x36, 0x89, 0x9e, 0x2c, 0x91, 0xb4, 0xbb, 0x89, 0x65, 0x2b, 0xf4, 0xaf, 0xe0, 0x86, 0x7a, 0x29, 0x70, 0xa9, 0x9b, 0x82, 0xa8, 0x25, 0x8f, 0xa3, 0x61, 0x80, 0x8d, 0x24, 0xe3, 0x9b, 0x42, 0x7d, 0x4b, 0x23, 0x46, 0x91, 0x15, 0x7b, 0x7f, 0x25, 0x1b, 0x84, 0x29, 0x78, 0xfa, 0x26, 0xbc, 0x74, 0xd7, 0x7a, 0x0d, 0x32, 0x6c, 0x65, 0x6c, 0x79, 0x72, 0x3b, 0xc2, 0x58, 0x72, 0x7c, 0xbd, 0x45, 0xc7, 0x50, 0x81, 0x7e, 0xe0, 0x4d, 0x48, 0x49, 0xab, 0x82, 0xc7, 0x55, 0x66, 0x44, 0x65, 0x84, 0xde, 0x5c, 0x08, 0x3d, 0xeb, 0x94, 0xfd, 0x30, 0x41, 0xc4, 0xc4, 0x95, 0x28, 0x30, 0x45, 0xbf, 0x49, 0x94, 0xeb, 0x2f, 0x4e, 0xba, 0xba, 0x94, 0x83, 0x2e, 0x1b, 0xb6, 0x43, 0x93, 0xeb, 0x2c, 0xad, 0xb1, 0xcb, 0x92, 0x90, 0x2b, 0x21, 0xac, 0x81, 0x90, 0x64, 0x29, 0x1f, 0xa6, 0x8f, 0x8e, 0x89, 0x27, 0xe7, 0xa0, 0x73, 0x8b, 0x2a, 0x25, 0xcb, 0x97, 0x97, 0x87, 0x3b, 0x23, 0x09, 0x8d, 0xa5, 0x86, 0xde, 0x26, 0x61, 0x81, 0x16, 0x85, 0xb1, 0x28, 0x7c, 0x72, 0xa5, 0x84, 0xc3, 0x31, 0xfd, 0x63, 0xe5, 0x87, 0x22, 0x3d, 0x36, 0x59, 0x39, 0x89, 0x4b, 0x45, 0xcf, 0x51, 0x22, 0x8b, 0x83, 0x4c, 0xd7, 0x4a, 0x3f, 0x8d, 0x55, 0x53, 0x38, 0x43, 0x82, 0x9c, 0xec, 0x2e, 0x8f, 0xc6, 0x17, 0x9d, 0x65, 0x2e, 0xf3, 0xc0, 0x8e, 0x9d, 0x5c, 0x2e, 0x63, 0xbc, 0x0d, 0x9d, 0x1f, 0x2d, 0x95, 0xb7, 0xaf, 0x9c, 0xcf, 0x2c, 0xaa, 0xb3, 0x4f, 0x9c, 0x29, 0x2b, 0x97, 0xae, 0xad, 0x9a, 0x6a, 0x2a, 0x22, 0xa8, 0xef, 0x98, 0x9c, 0x28, 0x95, 0xa3, 0x35, 0x96, 0xae, 0x27, 0x3d, 0x9c, 0x92, 0x94, 0xbe, 0x26, 0x82, 0x94, 0x0d, 0x93, 0x20, 0x26, 0xd8, 0x89, 0xfa, 0x92, 0x49, 0x28, 0xed, 0x7e, 0x0f, 0x91, 0x2b, 0x2b, 0xc1, 0x70, 0x7d, 0x91, 0x66, 0x35, 0x27, 0x63, 0xeb, 0x92, 0xcc, 0x3e, 0x0c, 0x59, 0x67, 0x93, 0xa1, 0x45, 0x06, 0x50, 0xca, 0x95, 0x88, 0x4b, 0x97, 0x49, 0x27, 0xa4, 0xaa, 0x2c, 0x45, 0xc7, 0x7c, 0xa5, 0x3c, 0x2c, 0xde, 0xc1, 0xe9, 0xa5, 0x4a, 0x2c, 0x51, 0xbd, 0x54, 0xa5, 0x21, 0x2b, 0x86, 0xb8, 0xfc, 0xa4, 0xe4, 0x2a, 0x94, 0xb4, 0xb2, 0xa4, 0x9c, 0x29, 0xa0, 0xb0, 0x62, 0xa3, 0xa7, 0x28, 0xd5, 0xab, 0x19, 0xa2, 0x6b, 0x27, 0xf4, 0xa5, 0xaf, 0xa0, 0xdf, 0x26, 0xfc, 0xa0, 0x39, 0x9f, 0x3a, 0x26, 0xcf, 0x98, 0xa1, 0x9d, 0x86, 0x26, 0x2e, 0x90, 0xc1, 0x9c, 0x76, 0x27, 0xb9, 0x86, 0x76, 0x9b, 0xe6, 0x2a, 0x97, 0x7b, 0x2d, 0x9b, 0x77, 0x2f, 0x5d, 0x6f, 0x0d, 0x9b, 0xb5, 0x36, 0xb3, 0x63, 0x1a, 0x9c, 0x83, 0x3e, 0x53, 0x58, 0xdd, 0x9d, 0x4f, 0x44, 0x68, 0x50, 0x24, 0xad, 0x53, 0x29, 0xae, 0xc8, 0xbb, 0xad, 0xaa, 0x2a, 0x17, 0xc3, 0x3e, 0xad, 0x8c, 0x29, 0x8a, 0xbe, 0x8b, 0xad, 0x85, 0x28, 0x3f, 0xba, 0x5a, 0xad, 0x69, 0x27, 0x20, 0xb6, 0x02, 0xad, 0x69, 0x25, 0xb6, 0xb1, 0xcf, 0xad, 0x3f, 0x24, 0xbd, 0xad, 0x01, 0xad, 0x2e, 0x23, 0xa6, 0xa7, 0xd1, 0xad, 0x0a, 0x22, 0x8a, 0xa2, 0xba, 0xac, 0x5d, 0x22, 0x20, 0x9c, 0xac, 0xaa, 0xe1, 0x22, 0x87, 0x95, 0x1b, 0xa8, 0x9e, 0x23, 0x62, 0x8d, 0x14, 0xa6, 0x68, 0x25, 0xe6, 0x83, 0x24, 0xa5, 0x34, 0x2a, 0x3e, 0x78, 0xb7, 0xa5, 0x30, 0x2f, 0x4f, 0x6e, 0x51, 0xa5, 0x2b, 0x36, 0x11, 0x62, 0xcc, 0xa5, 0x35, 0x3d, 0x7b, 0x57, 0xfb, 0x08, 0x6b, 0x87, 0x9b, 0x85, 0x31, 0x0b, 0xb6, 0x8a, 0xea, 0x7b, 0x53, 0x0d, 0x77, 0x8d, 0xea, 0x72, 0xc3, 0x0f, 0xc9, 0x91, 0x52, 0x6a, 0x49, 0x11, 0xce, 0x94, 0xba, 0x61, 0xe9, 0x14, 0x20, 0x98, 0xae, 0x5a, 0x3f, 0x16, 0xe0, 0x9c, 0x78, 0x52, 0x6c, 0x1a, 0x29, 0xa0, 0x27, 0x4b, 0xac, 0x1c, 0xfe, 0xa3, 0x54, 0x45, 0xd4, 0x1f, 0xae, 0xa6, 0x0c, 0x40, 0x50, 0x22, 0x31, 0xa8, 0x42, 0x3c, 0x4e, 0x24, 0xfa, 0xaa, 0x15, 0x38, 0x6f, 0x27, 0xc8, 0xab, 0xca, 0x34, 0xac, 0x2a, 0xc8, 0xad, 0x4c, 0x31, 0x11, 0x2d, 0xd2, 0xaf, 0x01, 0x2c, 0xc5, 0x30, 0xf0, 0xb0, 0xa7, 0x28, 0x25, 0x33, 0xbf, 0xb1, 0xee, 0x24, 0xa8, 0x12, 0xcb, 0x7e, 0xef, 0x89, 0xc4, 0x16, 0x0d, 0x80, 0x80, 0x80, 0x80, 0x16, 0xc7, 0x84, 0x95, 0x77, 0x6f, 0x18, 0x23, 0x88, 0x43, 0x6e, 0x78, 0x1a, 0x45, 0x8c, 0x52, 0x65, 0x8d, 0x1c, 0xab, 0x90, 0x90, 0x5c, 0xfe, 0x1e, 0x84, 0x94, 0xac, 0x55, 0x04, 0x21, 0x1b, 0x98, 0x92, 0x4d, 0x9e, 0x24, 0x72, 0x9c, 0x49, 0x47, 0x4f, 0x28, 0x02, 0x9f, 0xe1, 0x40, 0xca, 0x2a, 0x4f, 0xa2, 0x45, 0x3c, 0xdf, 0x2c, 0xb1, 0xa4, 0x5d, 0x39, 0x3e, 0x2f, 0x36, 0xa6, 0x3e, 0x35, 0xb9, 0x31, 0xf4, 0xa7, 0xf3, 0x32, 0x47, 0x34, 0xe0, 0xa9, 0x8f, 0x2e, 0xa6, 0x37, 0xcb, 0xab, 0x0d, 0x2a, 0xea, 0x3a, 0xa5, 0xac, 0x5e, 0x27, 0x75, 0x1b, 0x59, 0x77, 0x11, 0x8e, 0x94, 0x1d, 0x20, 0x78, 0xe1, 0x85, 0xa4, 0x1e, 0xf2, 0x7b, 0x36, 0x7c, 0xb7, 0x20, 0xc7, 0x7f, 0x0a, 0x73, 0x5f, 0x22, 0x53, 0x83, 0x4e, 0x6a, 0x1f, 0x24, 0x10, 0x87, 0x8b, 0x60, 0xf1, 0x26, 0x4b, 0x8c, 0x52, 0x58, 0x95, 0x28, 0x8e, 0x90, 0x97, 0x50, 0x66, 0x2b, 0x30, 0x94, 0x8e, 0x4a, 0x1c, 0x2e, 0x22, 0x98, 0x68, 0x43, 0x98, 0x31, 0x30, 0x9b, 0xbe, 0x3e, 0x41, 0x34, 0x0c, 0x9e, 0x87, 0x3a, 0x63, 0x36, 0x99, 0xa0, 0xf2, 0x36, 0xcd, 0x39, 0x1a, 0xa2, 0xfa, 0x33, 0x59, 0x3b, 0xc6, 0xa4, 0xcf, 0x2f, 0xe8, 0x3e, 0x94, 0xa6, 0x48, 0x2c, 0x91, 0x41, 0x41, 0xa7, 0x95, 0x29, 0x49, 0x25, 0x6d, 0x6f, 0x0e, 0x94, 0x00, 0x26, 0xc3, 0x70, 0xbf, 0x8b, 0x1e, 0x27, 0xc4, 0x72, 0xae, 0x82, 0x5b, 0x28, 0xd2, 0x75, 0xc0, 0x78, 0xfc, 0x2a, 0x2d, 0x79, 0x6a, 0x6f, 0x84, 0x2c, 0x48, 0x7e, 0x54, 0x66, 0x0b, 0x2e, 0x10, 0x83, 0x32, 0x5c, 0xec, 0x2f, 0xd7, 0x87, 0xe9, 0x54, 0x72, 0x32, 0x5d, 0x8c, 0x38, 0x4d, 0x36, 0x35, 0x3b, 0x90, 0x79, 0x47, 0x1c, 0x37, 0xe1, 0x94, 0x68, 0x40, 0x8c, 0x3a, 0x8b, 0x97, 0x9c, 0x3c, 0x38, 0x3d, 0x33, 0x9a, 0xa8, 0x38, 0x46, 0x3f, 0xf0, 0x9d, 0xb9, 0x34, 0x88, 0x42, 0xa6, 0xa0, 0x36, 0x31, 0x00, 0x45, 0x2f, 0xa1, 0xac, 0x2d, 0xba, 0x47, 0xbd, 0xa3, 0x0b, 0x2a, 0x6d, 0x2f, 0xb6, 0x66, 0xef, 0x9a, 0x3a, 0x31, 0x11, 0x67, 0xf0, 0x91, 0xa9, 0x31, 0x74, 0x69, 0xde, 0x88, 0xb3, 0x32, 0x40, 0x6c, 0x12, 0x7f, 0xba, 0x33, 0x73, 0x70, 0x0a, 0x75, 0x59, 0x34, 0x65, 0x74, 0x0d, 0x6b, 0xd4, 0x36, 0x1b, 0x78, 0xd3, 0x62, 0x72, 0x38, 0x58, 0x7e, 0x58, 0x59, 0x92, 0x3a, 0x34, 0x83, 0x33, 0x51, 0x32, 0x3c, 0x84, 0x87, 0xb8, 0x4a, 0x9a, 0x3f, 0x36, 0x8c, 0x52, 0x44, 0x0a, 0x41, 0xee, 0x90, 0x63, 0x3e, 0x79, 0x44, 0x21, 0x93, 0x89, 0x3a, 0x64, 0x46, 0x77, 0x96, 0x8b, 0x36, 0x7a, 0x48, 0xe7, 0x99, 0x66, 0x32, 0xae, 0x4b, 0x71, 0x9b, 0xf4, 0x2f, 0x10, 0x4e, 0x27, 0x9e, 0x12, 0x2b, 0xb8, 0x3c, 0xb2, 0x5e, 0x60, 0xa1, 0x0c, 0x3d, 0x96, 0x5f, 0x9b, 0x98, 0xc8, 0x3e, 0x74, 0x60, 0xd3, 0x90, 0xc3, 0x3d, 0x4d, 0x62, 0xb8, 0x87, 0x2e, 0x3c, 0xca, 0x65, 0x41, 0x7d, 0x52, 0x3d, 0x67, 0x69, 0x8b, 0x72, 0x65, 0x3f, 0x35, 0x6e, 0x91, 0x68, 0xa8, 0x40, 0x8a, 0x73, 0x65, 0x5f, 0x4a, 0x42, 0x59, 0x79, 0x03, 0x56, 0x9a, 0x44, 0xba, 0x7e, 0x3c, 0x4e, 0xc2, 0x46, 0xc6, 0x83, 0x1c, 0x48, 0x75, 0x48, 0xe9, 0x87, 0xae, 0x41, 0xde, 0x4b, 0x43, 0x8b, 0xc7, 0x3c, 0xf7, 0x4d, 0x97, 0x8f, 0x80, 0x38, 0xbe, 0x4f, 0xac, 0x92, 0xb3, 0x34, 0xae, 0x51, 0xeb, 0x95, 0x7e, 0x30, 0xbb, 0x54, 0x7c, 0x97, 0xdd, 0x2d, 0x2d, 0x47, 0xa3, 0x55, 0xf3, 0xa8, 0x3b, 0x49, 0x26, 0x57, 0x0d, 0x9f, 0xf1, 0x48, 0xd0, 0x58, 0x0c, 0x97, 0xb4, 0x48, 0xd5, 0x59, 0x29, 0x8f, 0x77, 0x47, 0xdf, 0x5b, 0x8c, 0x85, 0x43, 0x48, 0x10, 0x5f, 0x49, 0x7a, 0x65, 0x48, 0x39, 0x63, 0xea, 0x6f, 0x37, 0x49, 0x40, 0x69, 0x00, 0x65, 0x80, 0x4b, 0x4c, 0x6e, 0x94, 0x5c, 0xac, 0x4c, 0xce, 0x74, 0x17, 0x54, 0x0c, 0x4e, 0xc0, 0x79, 0x5f, 0x4c, 0x82, 0x51, 0x09, 0x7e, 0xbe, 0x46, 0x17, 0x52, 0xef, 0x83, 0x6b, 0x3f, 0xce, 0x54, 0xfd, 0x87, 0x6b, 0x3b, 0x44, 0x57, 0x20, 0x8b, 0x13, 0x36, 0xf2, 0x59, 0x51, 0x8e, 0x6f, 0x32, 0xc9, 0x5b, 0x8f, 0x91, 0x67, 0x2e, 0xc2, 0x53, 0xe9, 0x4d, 0xcf, 0xaf, 0x9b, 0x55, 0x00, 0x4e, 0xf5, 0xa7, 0x28, 0x56, 0x47, 0x50, 0x4b, 0x9e, 0xd9, 0x54, 0xeb, 0x51, 0x3e, 0x96, 0x81, 0x53, 0xc8, 0x52, 0x5e, 0x8d, 0xe7, 0x52, 0x71, 0x54, 0xcc, 0x83, 0x41, 0x52, 0x32, 0x58, 0xfd, 0x77, 0x9b, 0x53, 0x48, 0x5e, 0xe2, 0x6c, 0x04, 0x53, 0x9d, 0x63, 0xe2, 0x62, 0x10, 0x55, 0x50, 0x69, 0x88, 0x59, 0x96, 0x57, 0x73, 0x6f, 0x6f, 0x51, 0xb0, 0x59, 0x28, 0x74, 0xcb, 0x4a, 0x80, 0x5b, 0x1e, 0x7a, 0x05, 0x43, 0xd3, 0x5d, 0x55, 0x7f, 0x0b, 0x3e, 0x36, 0x5f, 0x21, 0x83, 0x20, 0x39, 0x9d, 0x60, 0xf1, 0x86, 0xbd, 0x35, 0x22, 0x62, 0xe1, 0x8a, 0x24, 0x30, 0x9f, 0x60, 0xa9, 0x46, 0x1b, 0xb6, 0xcb, 0x61, 0xf9, 0x47, 0x64, 0xae, 0xaa, 0x61, 0x99, 0x47, 0xef, 0xa6, 0xd9, 0x61, 0x77, 0x48, 0xb7, 0x9e, 0xd4, 0x60, 0x48, 0x4a, 0x10, 0x95, 0xc2, 0x5f, 0x7e, 0x4b, 0xfc, 0x8c, 0x42, 0x5f, 0x45, 0x4f, 0x3b, 0x81, 0x88, 0x5d, 0x55, 0x53, 0xa6, 0x74, 0xf3, 0x5d, 0x47, 0x59, 0x6f, 0x69, 0x08, 0x5e, 0xec, 0x5f, 0xd7, 0x5e, 0xf4, 0x60, 0x02, 0x64, 0xe2, 0x56, 0xc1, 0x61, 0xaa, 0x6a, 0x52, 0x4f, 0x1d, 0x63, 0xcb, 0x70, 0x52, 0x48, 0x8e, 0x65, 0x68, 0x75, 0x63, 0x41, 0xbd, 0x67, 0x49, 0x7a, 0x40, 0x3c, 0x75, 0x69, 0x1e, 0x7e, 0xd4, 0x37, 0xaa, 0x6a, 0xc5, 0x82, 0xb0, 0x32, 0xd2, 0x6d, 0xf2, 0x40, 0x9d, 0xbc, 0x14, 0x6f, 0x09, 0x41, 0x75, 0xb4, 0xc1, 0x6f, 0x94, 0x42, 0x05, 0xad, 0x89, 0x6e, 0xf8, 0x42, 0x1f, 0xa6, 0x63, 0x6e, 0x37, 0x42, 0x50, 0x9f, 0x02, 0x6c, 0x02, 0x43, 0x37, 0x95, 0x8a, 0x6a, 0x04, 0x44, 0xbe, 0x8b, 0x3d, 0x68, 0x81, 0x47, 0x5e, 0x7f, 0xaf, 0x68, 0x32, 0x4e, 0x12, 0x72, 0x7e, 0x67, 0xf8, 0x54, 0x37, 0x66, 0x40, 0x68, 0xfa, 0x5a, 0x16, 0x5c, 0x7c, 0x6a, 0xc2, 0x60, 0x06, 0x54, 0x73, 0x6c, 0x3e, 0x65, 0x8b, 0x4d, 0x3b, 0x6e, 0x12, 0x6b, 0x73, 0x46, 0xaa, 0x70, 0x06, 0x70, 0xfd, 0x40, 0x00, 0x71, 0x83, 0x75, 0xc6, 0x3a, 0xc0, 0x73, 0x14, 0x7a, 0x56, 0x35, 0x7a, 0x7b, 0x0c, 0x3c, 0x47, 0xc0, 0x26, 0x7b, 0x68, 0x3c, 0x88, 0xb9, 0x73, 0x7b, 0xe4, 0x3c, 0xe8, 0xb2, 0xf5, 0x7b, 0xa7, 0x3d, 0x0b, 0xac, 0x37, 0x7a, 0x9e, 0x3c, 0xb9, 0xa5, 0x64, 0x79, 0x4b, 0x3c, 0x78, 0x9e, 0x18, 0x77, 0x1d, 0x3d, 0x0a, 0x94, 0xba, 0x75, 0x61, 0x3e, 0xf6, 0x89, 0xe3, 0x73, 0x91, 0x42, 0x24, 0x7d, 0x45, 0x72, 0x8f, 0x48, 0x1f, 0x6f, 0xd2, 0x73, 0x7b, 0x4f, 0x67, 0x63, 0xab, 0x73, 0xcc, 0x55, 0x0f, 0x5a, 0x35, 0x74, 0xe8, 0x5a, 0xce, 0x52, 0x22, 0x76, 0xc0, 0x61, 0x2f, 0x4b, 0x82, 0x78, 0x38, 0x66, 0xef, 0x45, 0x08, 0x79, 0xea, 0x6c, 0x5e, 0x3e, 0xae, 0x7b, 0xa5, 0x71, 0x8f, 0x38, 0xdb, 0x86, 0x2b, 0x39, 0x21, 0xc2, 0xa4, 0x86, 0x76, 0x39, 0x38, 0xbc, 0x70, 0x86, 0x73, 0x38, 0xe6, 0xb6, 0xbe, 0x86, 0x5c, 0x38, 0x75, 0xb1, 0x20, 0x85, 0x46, 0x37, 0xdd, 0xaa, 0xbc, 0x83, 0xe3, 0x37, 0x23, 0xa4, 0x1d, 0x82, 0x5b, 0x36, 0xaf, 0x9c, 0x8f, 0x80, 0xb2, 0x37, 0x11, 0x93, 0x37, 0x7f, 0x72, 0x39, 0x2c, 0x87, 0xf2, 0x7e, 0xaf, 0x3d, 0x67, 0x7b, 0x27, 0x7d, 0x63, 0x43, 0x34, 0x6d, 0x46, 0x7d, 0xcb, 0x4a, 0x68, 0x60, 0xa0, 0x7e, 0x7a, 0x50, 0xf7, 0x57, 0xde, 0x7e, 0xd8, 0x56, 0x57, 0x4f, 0xdc, 0x80, 0xd8, 0x5d, 0x22, 0x49, 0xc3, 0x82, 0x78, 0x63, 0x11, 0x43, 0x79, 0x84, 0x08, 0x68, 0x8a, 0x3c, 0xf4, 0x90, 0x19, 0x36, 0xf1, 0xc4, 0x54, 0x90, 0x87, 0x37, 0x1d, 0xbe, 0x4f, 0x90, 0x85, 0x36, 0x8f, 0xb9, 0x2a, 0x90, 0x70, 0x35, 0xde, 0xb4, 0x1e, 0x90, 0x16, 0x35, 0x0c, 0xae, 0xfb, 0x8e, 0x85, 0x34, 0x55, 0xa8, 0x99, 0x8c, 0xc9, 0x33, 0x83, 0xa2, 0x27, 0x8b, 0x03, 0x33, 0x46, 0x9a, 0x1b, 0x89, 0x41, 0x33, 0x7a, 0x91, 0x0c, 0x88, 0x6c, 0x35, 0xdf, 0x85, 0x1b, 0x87, 0xd1, 0x39, 0xea, 0x78, 0x1d, 0x87, 0x95, 0x40, 0x6a, 0x6a, 0xd2, 0x87, 0x94, 0x47, 0x21, 0x5e, 0xb7, 0x88, 0x9b, 0x4e, 0x1e, 0x56, 0x4b, 0x89, 0x4f, 0x53, 0xc5, 0x4e, 0xe2, 0x8a, 0xe9, 0x59, 0xe2, 0x48, 0x5c, 0x8c, 0x81, 0x5f, 0xb4, 0x41, 0x94, 0x98, 0x52, 0x35, 0x14, 0xc5, 0xaa, 0x98, 0xe9, 0x35, 0x6c, 0xbf, 0xae, 0x98, 0xef, 0x34, 0xfb, 0xba, 0xd7, 0x98, 0xea, 0x34, 0x83, 0xb6, 0x09, 0x98, 0xd7, 0x33, 0xed, 0xb1, 0x53, 0x97, 0xe9, 0x33, 0x45, 0xab, 0xb0, 0x96, 0x83, 0x32, 0x7a, 0xa5, 0x9e, 0x94, 0xf8, 0x31, 0x96, 0x9f, 0x65, 0x93, 0x7a, 0x31, 0x82, 0x97, 0x14, 0x92, 0x0b, 0x31, 0xbb, 0x8e, 0x2c, 0x91, 0xc4, 0x34, 0x57, 0x82, 0x27, 0x91, 0x61, 0x38, 0x70, 0x75, 0x5f, 0x91, 0x7b, 0x3e, 0xe3, 0x68, 0xe7, 0x91, 0xa2, 0x45, 0x6e, 0x5d, 0x9e, 0x92, 0x7d, 0x4c, 0x17, 0x55, 0x45, 0x93, 0x7b, 0x51, 0xc4, 0x4d, 0xf8, 0x94, 0xd7, 0x57, 0x52, 0x46, 0xd6, 0xa0, 0x73, 0x33, 0x63, 0xc6, 0xe4, 0xa1, 0x08, 0x33, 0xc5, 0xc0, 0xff, 0xa1, 0x29, 0x33, 0x86, 0xbc, 0x3d, 0xa1, 0x31, 0x33, 0x33, 0xb7, 0xa5, 0xa1, 0x31, 0x32, 0xd3, 0xb3, 0x1a, 0xa0, 0xe5, 0x32, 0x71, 0xae, 0x3d, 0x9f, 0xd6, 0x31, 0xf7, 0xa8, 0x74, 0x9e, 0x76, 0x31, 0x36, 0xa2, 0x98, 0x9d, 0x0b, 0x30, 0xc2, 0x9b, 0xb6, 0x9b, 0xaf, 0x30, 0xb7, 0x93, 0xbc, 0x9a, 0xba, 0x31, 0xb2, 0x8a, 0x4f, 0x9a, 0x69, 0x34, 0x15, 0x7f, 0x42, 0x9a, 0x6a, 0x38, 0x1b, 0x73, 0x1b, 0x9a, 0xad, 0x3e, 0x7c, 0x67, 0x2f, 0x9a, 0xe3, 0x44, 0xcb, 0x5c, 0x85, 0x9b, 0xad, 0x4a, 0x99, 0x54, 0x40, 0x9c, 0xe4, 0x50, 0x0a, 0x4c, 0xbc, 0xa8, 0x03, 0x31, 0xa6, 0xc7, 0xc7, 0xa8, 0x86, 0x32, 0x08, 0xc2, 0x29, 0xa8, 0xa8, 0x31, 0xe4, 0xbd, 0x4c, 0xa8, 0x92, 0x31, 0x72, 0xb8, 0xcf, 0xa8, 0x71, 0x30, 0xf6, 0xb4, 0x5c, 0xa8, 0x46, 0x30, 0x6f, 0xaf, 0xf8, 0xa7, 0x81, 0x30, 0x01, 0xaa, 0x9d, 0xa6, 0xb0, 0x2f, 0x62, 0xa5, 0x36, 0xa5, 0xca, 0x2e, 0xb9, 0x9f, 0xba, 0xa4, 0xcc, 0x2e, 0xef, 0x98, 0x32, 0xa3, 0xb8, 0x2f, 0x30, 0x90, 0x98, 0xa3, 0x1b, 0x31, 0x0e, 0x87, 0x01, 0xa2, 0xd1, 0x33, 0x86, 0x7c, 0x99, 0xa2, 0xfe, 0x37, 0x61, 0x71, 0x32, 0xa3, 0x24, 0x3d, 0x98, 0x65, 0xc4, 0xa3, 0x78, 0x43, 0xa8, 0x5b, 0x70, 0xa4, 0x44, 0x49, 0x14, 0x52, 0xd8, 0xaf, 0xb1, 0x2f, 0xef, 0xc8, 0xb5, 0xb0, 0x3c, 0x30, 0x76, 0xc3, 0x16, 0xb0, 0x6a, 0x30, 0x65, 0xbe, 0x36, 0xb0, 0x35, 0x2f, 0xc4, 0xb9, 0xcb, 0xb0, 0x00, 0x2e, 0xd6, 0xb5, 0x80, 0xaf, 0xd8, 0x2d, 0xd7, 0xb1, 0x39, 0xaf, 0x97, 0x2d, 0x19, 0xac, 0x6e, 0xaf, 0x58, 0x2c, 0x64, 0xa7, 0x6b, 0xaf, 0x23, 0x2b, 0x96, 0xa2, 0x5f, 0xae, 0x92, 0x2b, 0x5a, 0x9c, 0x51, 0xad, 0xb6, 0x2b, 0xbd, 0x94, 0xee, 0xac, 0x6d, 0x2c, 0xa4, 0x8d, 0x44, 0xab, 0x1d, 0x2f, 0x4e, 0x84, 0x31, 0xaa, 0xbc, 0x32, 0x18, 0x7a, 0x63, 0xaa, 0xf3, 0x36, 0x01, 0x6f, 0xa8, 0xab, 0x03, 0x3c, 0x03, 0x64, 0xa6, 0xab, 0x8e, 0x42, 0x18, 0x5a, 0x35, 0x0e, 0xed, 0x8d, 0xe7, 0x88, 0x64, 0x11, 0x77, 0x90, 0x3a, 0x7f, 0xa1, 0x13, 0x65, 0x93, 0x22, 0x77, 0x12, 0x15, 0x32, 0x95, 0xfb, 0x6e, 0xc0, 0x17, 0x0b, 0x99, 0x55, 0x66, 0x24, 0x18, 0xee, 0x9c, 0xb7, 0x5d, 0xa2, 0x1b, 0x9c, 0xa0, 0x6d, 0x55, 0xb2, 0x1e, 0x0f, 0xa3, 0xd3, 0x4e, 0xb8, 0x20, 0x82, 0xa7, 0x08, 0x49, 0x21, 0x23, 0x42, 0xa9, 0x96, 0x43, 0x20, 0x25, 0xfb, 0xab, 0xab, 0x3e, 0x3c, 0x28, 0xe8, 0xad, 0x57, 0x3a, 0x52, 0x2b, 0xd1, 0xae, 0xf4, 0x36, 0x93, 0x2e, 0xfb, 0xb0, 0x43, 0x33, 0x12, 0x31, 0xcc, 0xb1, 0x80, 0x2f, 0x72, 0x34, 0x8e, 0xb2, 0xe4, 0x2b, 0x58, 0x37, 0x47, 0xb4, 0x17, 0x27, 0x9f, 0x18, 0x2d, 0x85, 0xc9, 0x8c, 0xc9, 0x1a, 0xb9, 0x87, 0x8a, 0x84, 0x15, 0x1c, 0x91, 0x8a, 0x3f, 0x7b, 0x3f, 0x1e, 0x2a, 0x8d, 0x72, 0x72, 0x43, 0x20, 0x27, 0x91, 0x24, 0x69, 0x6c, 0x21, 0xbc, 0x94, 0x8e, 0x60, 0xa4, 0x23, 0xa1, 0x98, 0x7b, 0x58, 0x93, 0x25, 0xfa, 0x9c, 0x2f, 0x50, 0x65, 0x29, 0x4e, 0x9f, 0xe5, 0x49, 0xe2, 0x2b, 0xdc, 0xa3, 0x13, 0x43, 0xb9, 0x2e, 0x45, 0xa5, 0x9d, 0x3e, 0x9d, 0x30, 0xc6, 0xa7, 0x8c, 0x3b, 0x05, 0x33, 0x7e, 0xa9, 0x43, 0x37, 0x8f, 0x36, 0x5c, 0xaa, 0xc8, 0x34, 0x2c, 0x39, 0x53, 0xac, 0x1d, 0x30, 0xcc, 0x3c, 0x3c, 0xad, 0x65, 0x2d, 0x50, 0x3f, 0x11, 0xae, 0x92, 0x2a, 0x01, 0x20, 0x9f, 0x7e, 0x62, 0x91, 0x63, 0x23, 0x1c, 0x7f, 0x9e, 0x88, 0xc8, 0x25, 0xd0, 0x80, 0x80, 0x80, 0x80, 0x27, 0x01, 0x84, 0x7d, 0x77, 0x3d, 0x28, 0x5b, 0x88, 0x22, 0x6e, 0x10, 0x2a, 0x20, 0x8c, 0x4b, 0x64, 0xca, 0x2c, 0x23, 0x90, 0x6e, 0x5c, 0x0d, 0x2d, 0xe7, 0x94, 0x44, 0x53, 0xe8, 0x30, 0x43, 0x98, 0x16, 0x4c, 0xb8, 0x33, 0x37, 0x9b, 0xde, 0x46, 0x31, 0x36, 0x3c, 0x9f, 0x5a, 0x3f, 0xba, 0x38, 0x8d, 0xa1, 0xbb, 0x3c, 0x23, 0x3a, 0xfb, 0xa3, 0xd4, 0x38, 0xa3, 0x3d, 0x97, 0xa5, 0xb4, 0x35, 0x3b, 0x40, 0x51, 0xa7, 0x56, 0x31, 0xdc, 0x42, 0xee, 0xa8, 0xb1, 0x2e, 0x84, 0x45, 0x79, 0xa9, 0xe9, 0x2b, 0x34, 0x2a, 0x39, 0x76, 0x83, 0x96, 0xde, 0x2c, 0x65, 0x77, 0xa4, 0x8e, 0x2a, 0x2e, 0x41, 0x79, 0x1e, 0x85, 0xa8, 0x30, 0x15, 0x7b, 0x23, 0x7c, 0xdf, 0x31, 0x72, 0x7e, 0xce, 0x73, 0x63, 0x32, 0xd2, 0x83, 0x18, 0x6a, 0x01, 0x34, 0x5c, 0x87, 0x3f, 0x60, 0xad, 0x36, 0x25, 0x8b, 0xb5, 0x58, 0x48, 0x38, 0x45, 0x8f, 0xc1, 0x50, 0x1d, 0x3a, 0x9b, 0x93, 0xe1, 0x49, 0xbb, 0x3d, 0x16, 0x97, 0xc3, 0x42, 0xe2, 0x3f, 0x92, 0x9b, 0x0d, 0x3d, 0xd6, 0x42, 0x35, 0x9d, 0xf2, 0x3a, 0x10, 0x44, 0xcf, 0xa0, 0x75, 0x36, 0x7f, 0x47, 0x33, 0xa2, 0x54, 0x33, 0x15, 0x49, 0x94, 0xa3, 0xf9, 0x2f, 0xa3, 0x4c, 0x0f, 0xa5, 0x56, 0x2c, 0x57, 0x34, 0xb9, 0x6e, 0x4e, 0x9c, 0xa9, 0x36, 0xf7, 0x6f, 0x38, 0x94, 0x18, 0x38, 0xaf, 0x70, 0xc0, 0x8b, 0x77, 0x3a, 0x12, 0x72, 0xa6, 0x82, 0xdf, 0x3a, 0xf7, 0x75, 0x88, 0x79, 0x76, 0x3b, 0xe2, 0x79, 0x0b, 0x6f, 0xcb, 0x3d, 0x76, 0x7d, 0xb1, 0x66, 0x68, 0x3f, 0x1f, 0x82, 0x47, 0x5d, 0x67, 0x40, 0xa1, 0x86, 0xd1, 0x55, 0x11, 0x42, 0x8e, 0x8b, 0x21, 0x4d, 0x97, 0x45, 0x0e, 0x8f, 0x94, 0x47, 0x23, 0x47, 0x3c, 0x93, 0x5f, 0x40, 0xac, 0x49, 0x66, 0x96, 0x86, 0x3c, 0x71, 0x4b, 0xb0, 0x99, 0x75, 0x38, 0x88, 0x4e, 0x0d, 0x9c, 0x2d, 0x34, 0xc3, 0x50, 0x75, 0x9e, 0xae, 0x31, 0x0c, 0x53, 0x07, 0xa0, 0x8a, 0x2d, 0x9b, 0x40, 0xfe, 0x65, 0xd6, 0xa3, 0x0d, 0x42, 0x8e, 0x66, 0xda, 0x9a, 0xd2, 0x43, 0xf1, 0x67, 0xf1, 0x92, 0xaa, 0x44, 0x94, 0x69, 0xbf, 0x89, 0xb1, 0x45, 0x67, 0x6b, 0xf4, 0x80, 0x8b, 0x46, 0x43, 0x6f, 0xd8, 0x76, 0x43, 0x46, 0xdf, 0x73, 0xae, 0x6c, 0xa5, 0x48, 0x0e, 0x78, 0x1e, 0x63, 0x73, 0x49, 0xb5, 0x7c, 0xf6, 0x5a, 0xb7, 0x4b, 0x7f, 0x81, 0xbb, 0x52, 0x6e, 0x4d, 0x06, 0x86, 0x5c, 0x4b, 0x79, 0x4e, 0xf4, 0x8a, 0xd1, 0x45, 0x05, 0x51, 0x30, 0x8e, 0xd6, 0x3f, 0x32, 0x53, 0x39, 0x92, 0x38, 0x3a, 0xf5, 0x55, 0x4a, 0x95, 0x2c, 0x36, 0xe2, 0x57, 0x6f, 0x97, 0xd5, 0x32, 0xf3, 0x59, 0xc2, 0x9a, 0x44, 0x2f, 0x18, 0x4c, 0x7c, 0x5d, 0x48, 0xa9, 0x9e, 0x4f, 0x5d, 0x5e, 0x88, 0xa1, 0x2c, 0x50, 0x6c, 0x5f, 0xb6, 0x99, 0x25, 0x51, 0x29, 0x60, 0xe6, 0x91, 0x35, 0x50, 0xbf, 0x63, 0x1d, 0x87, 0xab, 0x50, 0xae, 0x65, 0xe0, 0x7d, 0xd0, 0x50, 0xf1, 0x69, 0xe2, 0x73, 0x44, 0x51, 0xe5, 0x6e, 0x69, 0x69, 0xc8, 0x53, 0x0b, 0x72, 0xe7, 0x60, 0xde, 0x54, 0x34, 0x77, 0xf2, 0x58, 0x39, 0x55, 0xf6, 0x7c, 0xb4, 0x50, 0x03, 0x57, 0xbb, 0x81, 0xcb, 0x49, 0x7c, 0x59, 0x5e, 0x86, 0x49, 0x42, 0xdf, 0x5b, 0x42, 0x8a, 0x37, 0x3d, 0xab, 0x5d, 0x3e, 0x8d, 0xbd, 0x39, 0x57, 0x5f, 0x36, 0x90, 0xe4, 0x35, 0x18, 0x61, 0x27, 0x93, 0xb5, 0x30, 0xc0, 0x58, 0x38, 0x54, 0xf1, 0xb0, 0x83, 0x5a, 0x05, 0x56, 0x2d, 0xa8, 0x55, 0x5c, 0x2e, 0x57, 0x6d, 0xa0, 0x52, 0x5c, 0x11, 0x58, 0xc8, 0x97, 0xf4, 0x5c, 0x2d, 0x5a, 0x39, 0x8f, 0x92, 0x5b, 0xd2, 0x5d, 0x03, 0x85, 0x34, 0x5b, 0xc2, 0x60, 0xbc, 0x7a, 0x65, 0x5b, 0xa9, 0x64, 0xa2, 0x70, 0x01, 0x5c, 0x4d, 0x69, 0x4f, 0x66, 0xab, 0x5d, 0x93, 0x6d, 0xe9, 0x5e, 0x1b, 0x5e, 0xeb, 0x73, 0x31, 0x55, 0xec, 0x60, 0x6e, 0x78, 0x14, 0x4e, 0x22, 0x62, 0x27, 0x7d, 0x27, 0x47, 0x78, 0x63, 0xdc, 0x81, 0xdd, 0x40, 0xe2, 0x65, 0x68, 0x85, 0xbf, 0x3c, 0x05, 0x67, 0x1e, 0x89, 0x4f, 0x37, 0x63, 0x68, 0xf7, 0x8c, 0xa6, 0x32, 0xb6, 0x64, 0xeb, 0x4d, 0x30, 0xb6, 0xf1, 0x67, 0x5b, 0x4e, 0xbb, 0xae, 0x4a, 0x68, 0xb1, 0x50, 0x02, 0xa6, 0x7e, 0x69, 0xc4, 0x51, 0x47, 0x9e, 0xdd, 0x68, 0xa8, 0x52, 0xa4, 0x96, 0x6b, 0x67, 0xd9, 0x54, 0x42, 0x8d, 0xa2, 0x66, 0xfe, 0x56, 0xcf, 0x83, 0x4d, 0x66, 0x38, 0x5a, 0xee, 0x77, 0xdb, 0x66, 0x65, 0x5f, 0xec, 0x6c, 0xc4, 0x66, 0xea, 0x64, 0x70, 0x63, 0x88, 0x67, 0xf7, 0x69, 0x2f, 0x5b, 0x3c, 0x69, 0x7c, 0x6e, 0x6b, 0x53, 0x7f, 0x6a, 0xee, 0x73, 0x84, 0x4c, 0x3b, 0x6c, 0x68, 0x78, 0x71, 0x45, 0x7d, 0x6e, 0x01, 0x7d, 0x2b, 0x3f, 0x2c, 0x6f, 0x8a, 0x81, 0x6f, 0x3a, 0x30, 0x71, 0x0d, 0x85, 0x1f, 0x35, 0x24, 0x71, 0x59, 0x47, 0x16, 0xbc, 0xe0, 0x73, 0x34, 0x48, 0x3c, 0xb4, 0xda, 0x74, 0x67, 0x49, 0x28, 0xad, 0x2b, 0x74, 0xbf, 0x49, 0xe3, 0xa5, 0xdf, 0x74, 0xf8, 0x4a, 0xce, 0x9e, 0x48, 0x74, 0x03, 0x4c, 0x3b, 0x95, 0x72, 0x73, 0x31, 0x4e, 0x0e, 0x8c, 0x19, 0x72, 0x54, 0x50, 0x6f, 0x81, 0xb9, 0x71, 0x12, 0x55, 0x51, 0x75, 0x9c, 0x70, 0xe3, 0x5a, 0x64, 0x6a, 0x5c, 0x71, 0x89, 0x5f, 0x4f, 0x60, 0xa0, 0x72, 0x98, 0x64, 0x66, 0x58, 0xc2, 0x73, 0xdd, 0x69, 0x68, 0x51, 0x18, 0x75, 0x65, 0x6e, 0xff, 0x4a, 0x66, 0x76, 0xb8, 0x73, 0xde, 0x43, 0xb9, 0x78, 0x16, 0x78, 0x78, 0x3d, 0x92, 0x79, 0x93, 0x7c, 0xf9, 0x37, 0xfd, 0x7f, 0x0c, 0x42, 0x1f, 0xc0, 0xc3, 0x7f, 0xd6, 0x42, 0xf4, 0xb9, 0xd0, 0x80, 0xa2, 0x43, 0xd2, 0xb2, 0xd5, 0x80, 0xc9, 0x44, 0x5d, 0xab, 0xcf, 0x80, 0x84, 0x44, 0xb2, 0xa4, 0xdc, 0x7f, 0xfb, 0x45, 0x3d, 0x9d, 0x59, 0x7e, 0x91, 0x46, 0x32, 0x94, 0x87, 0x7d, 0x56, 0x47, 0xdd, 0x8a, 0xaf, 0x7c, 0x51, 0x4a, 0x1d, 0x7f, 0xf8, 0x7b, 0xf2, 0x50, 0x13, 0x73, 0x55, 0x7b, 0xc8, 0x55, 0x73, 0x67, 0xcb, 0x7c, 0x3a, 0x5a, 0x5e, 0x5e, 0x37, 0x7d, 0x14, 0x5f, 0xc7, 0x56, 0x59, 0x7e, 0x1e, 0x64, 0xd9, 0x4e, 0xfe, 0x7f, 0x82, 0x6a, 0x64, 0x48, 0xa6, 0x80, 0xfb, 0x6f, 0x75, 0x42, 0x21, 0x82, 0x45, 0x74, 0x42, 0x3b, 0xc1, 0x8a, 0x0f, 0x3e, 0x66, 0xc3, 0x42, 0x8a, 0xfe, 0x3e, 0xf3, 0xbc, 0xa8, 0x8b, 0x91, 0x3f, 0x62, 0xb6, 0x98, 0x8c, 0x4a, 0x40, 0x07, 0xb0, 0x65, 0x8b, 0x98, 0x40, 0x0f, 0xa9, 0xd0, 0x8a, 0xd2, 0x40, 0x10, 0xa3, 0x32, 0x89, 0xb5, 0x40, 0x6b, 0x9b, 0x9d, 0x88, 0x43, 0x41, 0x23, 0x93, 0x0b, 0x87, 0x50, 0x43, 0x06, 0x88, 0xa8, 0x86, 0x85, 0x45, 0xe3, 0x7d, 0x3d, 0x85, 0xfb, 0x4b, 0x21, 0x70, 0xb3, 0x86, 0x85, 0x51, 0x5b, 0x64, 0x99, 0x86, 0xa7, 0x56, 0x7f, 0x5b, 0xd2, 0x87, 0x2a, 0x5b, 0xa7, 0x54, 0x50, 0x88, 0x62, 0x61, 0x05, 0x4d, 0x4e, 0x89, 0xb0, 0x66, 0x74, 0x46, 0xc2, 0x8b, 0x01, 0x6b, 0x94, 0x3f, 0xe7, 0x93, 0x45, 0x3b, 0xb3, 0xc5, 0x53, 0x94, 0x37, 0x3c, 0x35, 0xbe, 0xb3, 0x94, 0x82, 0x3c, 0x35, 0xb9, 0x42, 0x94, 0xd0, 0x3c, 0x3e, 0xb3, 0xcb, 0x94, 0xc9, 0x3c, 0x36, 0xae, 0x1c, 0x93, 0xf0, 0x3c, 0x08, 0xa7, 0xbf, 0x93, 0x0c, 0x3b, 0xce, 0xa1, 0x5f, 0x91, 0xfe, 0x3c, 0x26, 0x99, 0x86, 0x90, 0xdc, 0x3c, 0xb6, 0x91, 0x32, 0x90, 0xd9, 0x3f, 0x28, 0x86, 0x1e, 0x90, 0x79, 0x42, 0xb3, 0x7a, 0x3b, 0x8f, 0xee, 0x47, 0xc5, 0x6e, 0x13, 0x90, 0x75, 0x4d, 0xc7, 0x61, 0x50, 0x90, 0xd1, 0x53, 0x79, 0x59, 0xca, 0x91, 0x47, 0x58, 0x82, 0x52, 0xc1, 0x92, 0x64, 0x5d, 0xb4, 0x4b, 0xa9, 0x93, 0xc0, 0x63, 0x06, 0x44, 0x74, 0x9b, 0x72, 0x39, 0x82, 0xc6, 0xcb, 0x9c, 0x99, 0x3a, 0x16, 0xc0, 0x36, 0x9c, 0xd7, 0x39, 0xfb, 0xbb, 0x29, 0x9d, 0x0f, 0x39, 0xe3, 0xb6, 0x21, 0x9d, 0x42, 0x39, 0xc5, 0xb1, 0x23, 0x9c, 0xb1, 0x39, 0x8e, 0xab, 0x55, 0x9b, 0xd0, 0x39, 0x4a, 0xa5, 0x38, 0x9a, 0xe0, 0x39, 0x0f, 0x9e, 0xdf, 0x99, 0xe1, 0x39, 0x66, 0x96, 0xf0, 0x98, 0xf1, 0x3a, 0x06, 0x8e, 0xa2, 0x99, 0x01, 0x3c, 0xec, 0x83, 0x4c, 0x99, 0x38, 0x40, 0xec, 0x77, 0x59, 0x98, 0xef, 0x45, 0xeb, 0x6b, 0xb4, 0x99, 0x3d, 0x4b, 0xb4, 0x5f, 0xb7, 0x99, 0xf6, 0x51, 0x3e, 0x58, 0x0d, 0x9a, 0x9d, 0x56, 0x0a, 0x51, 0x21, 0x9c, 0x03, 0x5b, 0x03, 0x49, 0x9f, 0xa3, 0x4c, 0x37, 0x76, 0xc8, 0x1d, 0xa4, 0x37, 0x37, 0xfb, 0xc1, 0xd3, 0xa4, 0x88, 0x37, 0xee, 0xbc, 0xb9, 0xa4, 0xad, 0x37, 0xc3, 0xb7, 0xea, 0xa4, 0xcb, 0x37, 0x8f, 0xb3, 0x26, 0xa4, 0xa8, 0x37, 0x5a, 0xae, 0x15, 0xa3, 0xfc, 0x37, 0x23, 0xa8, 0x4a, 0xa3, 0x43, 0x36, 0xea, 0xa2, 0x78, 0xa2, 0x84, 0x36, 0xfc, 0x9b, 0xb3, 0xa1, 0xc3, 0x37, 0x58, 0x94, 0x1e, 0xa1, 0x3d, 0x38, 0x99, 0x8b, 0x48, 0xa1, 0x2b, 0x3b, 0x51, 0x80, 0x8a, 0xa1, 0xb6, 0x3f, 0x3b, 0x74, 0xe8, 0xa1, 0xa7, 0x44, 0x74, 0x69, 0xb4, 0xa1, 0xe2, 0x4a, 0x0e, 0x5e, 0x8b, 0xa2, 0x9d, 0x4f, 0x26, 0x56, 0x89, 0xa3, 0x87, 0x53, 0xe4, 0x4f, 0x22, 0xaa, 0xdd, 0x35, 0x8b, 0xc9, 0x11, 0xab, 0x91, 0x36, 0x06, 0xc3, 0x33, 0xab, 0xef, 0x36, 0x12, 0xbd, 0xff, 0xab, 0xfc, 0x35, 0xcd, 0xb9, 0x50, 0xab, 0xfe, 0x35, 0x79, 0xb4, 0xb1, 0xab, 0xfa, 0x35, 0x20, 0xb0, 0x19, 0xab, 0x77, 0x34, 0xef, 0xaa, 0xab, 0xaa, 0xe6, 0x34, 0xb9, 0xa5, 0x35, 0xaa, 0x48, 0x34, 0x7d, 0x9f, 0xaf, 0xa9, 0xb1, 0x34, 0xd6, 0x98, 0x5d, 0xa9, 0x16, 0x35, 0x33, 0x91, 0x0b, 0xa8, 0xc0, 0x36, 0xda, 0x87, 0xf9, 0xa8, 0xa1, 0x39, 0x25, 0x7e, 0x25, 0xa8, 0xeb, 0x3d, 0x4b, 0x73, 0x1a, 0xa9, 0x15, 0x42, 0x8c, 0x68, 0x1d, 0xa9, 0x84, 0x47, 0xf7, 0x5d, 0x83, 0xaa, 0x71, 0x4c, 0xfe, 0x54, 0xc6, 0xb3, 0x22, 0x33, 0x68, 0xca, 0x2b, 0xb3, 0x47, 0x34, 0x19, 0xc4, 0x82, 0xb3, 0xad, 0x34, 0x5f, 0xbf, 0x3d, 0xb3, 0xa4, 0x34, 0x02, 0xba, 0xaa, 0xb3, 0x89, 0x33, 0x97, 0xb6, 0x26, 0xb3, 0x71, 0x33, 0x1d, 0xb1, 0xad, 0xb3, 0x21, 0x32, 0xce, 0xac, 0xc8, 0xb2, 0xc5, 0x32, 0x93, 0xa7, 0xa6, 0xb2, 0x5b, 0x32, 0x58, 0xa2, 0x80, 0xb1, 0xd1, 0x32, 0x66, 0x9c, 0x6e, 0xb1, 0x73, 0x32, 0xba, 0x95, 0x55, 0xb0, 0xe9, 0x33, 0x59, 0x8d, 0xfc, 0xb0, 0x93, 0x35, 0x03, 0x85, 0x4f, 0xb0, 0x5d, 0x37, 0x70, 0x7b, 0xfb, 0xb0, 0x90, 0x3b, 0x8e, 0x71, 0x7f, 0xb0, 0xa9, 0x40, 0xb5, 0x66, 0xc4, 0xb1, 0x41, 0x45, 0xf7, 0x5c, 0x48, 0x15, 0x13, 0x94, 0x44, 0x8c, 0x12, 0x17, 0x79, 0x96, 0x25, 0x83, 0xc9, 0x19, 0x58, 0x98, 0x89, 0x7b, 0x84, 0x1a, 0xdf, 0x9b, 0x20, 0x73, 0x51, 0x1c, 0x5a, 0x9e, 0x00, 0x6a, 0xa6, 0x1d, 0x9c, 0xa0, 0xde, 0x61, 0x3c, 0x20, 0x01, 0xa4, 0x77, 0x59, 0xe3, 0x22, 0x07, 0xa7, 0xde, 0x52, 0x5a, 0x24, 0x76, 0xaa, 0xc9, 0x4c, 0x22, 0x27, 0x02, 0xad, 0x41, 0x46, 0x03, 0x29, 0xbe, 0xaf, 0x35, 0x3f, 0xf6, 0x2c, 0xbd, 0xb0, 0xb8, 0x3c, 0x17, 0x2f, 0x24, 0xb2, 0x61, 0x38, 0x75, 0x32, 0x19, 0xb3, 0x7d, 0x34, 0xff, 0x35, 0x5a, 0xb4, 0x3d, 0x31, 0x8f, 0x38, 0x33, 0xb5, 0x49, 0x2d, 0xeb, 0x3b, 0x0c, 0xb6, 0x61, 0x2a, 0x6a, 0x1d, 0x9e, 0x8c, 0x70, 0x90, 0x49, 0x20, 0x0b, 0x8e, 0x49, 0x87, 0x7f, 0x22, 0x35, 0x90, 0x53, 0x7e, 0xdc, 0x23, 0xd9, 0x93, 0x03, 0x76, 0x77, 0x25, 0x5a, 0x95, 0xcd, 0x6e, 0x07, 0x26, 0xe1, 0x99, 0x09, 0x64, 0xf2, 0x28, 0xa1, 0x9c, 0x68, 0x5c, 0x27, 0x2b, 0x01, 0xa0, 0x02, 0x53, 0xc0, 0x2d, 0x4d, 0xa3, 0x6c, 0x4c, 0xe8, 0x2f, 0xc4, 0xa6, 0x78, 0x46, 0xa7, 0x32, 0x31, 0xa8, 0xfa, 0x40, 0x78, 0x34, 0xe3, 0xaa, 0xbe, 0x3c, 0xd4, 0x37, 0xc3, 0xac, 0x46, 0x39, 0x5e, 0x3a, 0xbf, 0xad, 0x96, 0x36, 0x06, 0x3d, 0xca, 0xae, 0xb3, 0x32, 0xbb, 0x40, 0xb8, 0xaf, 0xb4, 0x2f, 0x70, 0x43, 0x1c, 0xb0, 0xf8, 0x2c, 0x02, 0x25, 0xd7, 0x85, 0x55, 0x94, 0xea, 0x28, 0x87, 0x86, 0x72, 0x8c, 0x39, 0x2a, 0xf0, 0x87, 0xd1, 0x83, 0xcd, 0x2c, 0xa5, 0x8a, 0x62, 0x7a, 0xee, 0x2d, 0xf2, 0x8d, 0x7b, 0x71, 0xc7, 0x2f, 0xa5, 0x91, 0x21, 0x68, 0xa5, 0x31, 0x4f, 0x94, 0x62, 0x5f, 0xb5, 0x33, 0x2e, 0x98, 0x1e, 0x57, 0x97, 0x35, 0x4b, 0x9b, 0xa9, 0x4f, 0x73, 0x38, 0x1e, 0x9f, 0x5f, 0x48, 0xd8, 0x3a, 0x72, 0xa2, 0x6d, 0x42, 0x5b, 0x3c, 0xc9, 0xa4, 0xc1, 0x3d, 0xf0, 0x3f, 0x59, 0xa6, 0xb6, 0x3a, 0x73, 0x42, 0x04, 0xa8, 0x5e, 0x37, 0x1d, 0x44, 0xa8, 0xa9, 0xcf, 0x33, 0xca, 0x47, 0x33, 0xab, 0x17, 0x30, 0x61, 0x49, 0xb1, 0xac, 0x4b, 0x2d, 0x10, 0x2e, 0x6b, 0x7e, 0x09, 0x99, 0xd8, 0x30, 0xf1, 0x7e, 0xf5, 0x90, 0xf9, 0x33, 0xa6, 0x7f, 0xd0, 0x88, 0xae, 0x36, 0x71, 0x80, 0x80, 0x80, 0x80, 0x37, 0x81, 0x84, 0x63, 0x77, 0x1d, 0x38, 0xb7, 0x87, 0xee, 0x6d, 0xd9, 0x3a, 0x4a, 0x8b, 0xd8, 0x64, 0x9c, 0x3c, 0x12, 0x8f, 0xbe, 0x5b, 0xe4, 0x3d, 0xb7, 0x93, 0x84, 0x53, 0xb8, 0x3f, 0xc0, 0x97, 0x5a, 0x4c, 0x68, 0x42, 0x23, 0x9b, 0x00, 0x45, 0xda, 0x44, 0x9c, 0x9e, 0x2c, 0x3f, 0xab, 0x47, 0x16, 0xa0, 0xc9, 0x3c, 0x05, 0x49, 0x6b, 0xa2, 0xdd, 0x38, 0x8b, 0x4b, 0xbe, 0xa4, 0xac, 0x35, 0x16, 0x4e, 0x05, 0xa6, 0x44, 0x31, 0x9b, 0x50, 0x68, 0xa7, 0xae, 0x2e, 0x22, 0x38, 0xda, 0x75, 0xbb, 0x9f, 0x72, 0x3b, 0x57, 0x76, 0x8a, 0x96, 0xe4, 0x3d, 0xc2, 0x77, 0x6e, 0x8e, 0x86, 0x3f, 0xbc, 0x78, 0xf8, 0x86, 0x02, 0x41, 0x83, 0x7b, 0x00, 0x7d, 0x22, 0x42, 0x8f, 0x7e, 0x79, 0x73, 0x86, 0x43, 0xd3, 0x82, 0x77, 0x6a, 0x44, 0x45, 0x2c, 0x86, 0x54, 0x61, 0x38, 0x46, 0xa4, 0x8a, 0x9d, 0x58, 0xda, 0x48, 0x60, 0x8e, 0x86, 0x50, 0xb9, 0x4a, 0x66, 0x92, 0xac, 0x4a, 0x3e, 0x4c, 0x7c, 0x96, 0x53, 0x43, 0xc0, 0x4e, 0xa4, 0x99, 0x79, 0x3e, 0x76, 0x50, 0xe4, 0x9c, 0x51, 0x3a, 0x8a, 0x53, 0x3f, 0x9e, 0xdf, 0x36, 0xc5, 0x55, 0x6f, 0xa0, 0xf9, 0x33, 0x20, 0x57, 0xa1, 0xa2, 0xb9, 0x2f, 0x6c, 0x44, 0x29, 0x6d, 0x4a, 0xa5, 0x3b, 0x46, 0xb5, 0x6d, 0xf9, 0x9c, 0xe0, 0x48, 0xc0, 0x6e, 0xff, 0x94, 0x85, 0x4a, 0x7f, 0x70, 0x93, 0x8b, 0xde, 0x4b, 0xe1, 0x72, 0x9f, 0x83, 0x32, 0x4c, 0xc5, 0x75, 0x69, 0x79, 0xe1, 0x4d, 0x88, 0x78, 0xb6, 0x70, 0x4a, 0x4e, 0xd1, 0x7c, 0xdc, 0x67, 0x4a, 0x50, 0x5f, 0x80, 0xd3, 0x5e, 0x8b, 0x51, 0x8f, 0x85, 0x65, 0x56, 0x3f, 0x53, 0x15, 0x89, 0x96, 0x4e, 0x95, 0x54, 0xf1, 0x8d, 0xea, 0x48, 0x3a, 0x56, 0xf6, 0x91, 0xc0, 0x41, 0xb8, 0x58, 0xd5, 0x94, 0xfa, 0x3d, 0x10, 0x5a, 0xcd, 0x97, 0xd0, 0x39, 0x02, 0x5c, 0xd3, 0x9a, 0x5b, 0x35, 0x11, 0x5e, 0xf4, 0x9c, 0xb3, 0x31, 0x16, 0x4f, 0xce, 0x64, 0xbb, 0xab, 0x81, 0x52, 0xcf, 0x65, 0xb0, 0xa3, 0x3d, 0x54, 0xa0, 0x66, 0xc4, 0x9b, 0x19, 0x55, 0xd8, 0x67, 0xfc, 0x92, 0xd6, 0x56, 0x92, 0x6a, 0x01, 0x89, 0xc3, 0x57, 0x78, 0x6c, 0x63, 0x80, 0x90, 0x58, 0x16, 0x6f, 0xec, 0x76, 0xbc, 0x58, 0xab, 0x73, 0x77, 0x6d, 0x72, 0x59, 0xa0, 0x77, 0x8a, 0x64, 0x9d, 0x5a, 0xeb, 0x7b, 0xe0, 0x5c, 0x14, 0x5c, 0xa2, 0x80, 0x6d, 0x53, 0xfd, 0x5d, 0xfc, 0x84, 0xde, 0x4c, 0xb7, 0x5f, 0x8d, 0x89, 0x3b, 0x46, 0x3c, 0x61, 0x60, 0x8d, 0x1b, 0x3f, 0xf7, 0x63, 0x30, 0x90, 0x7c, 0x3b, 0x8e, 0x64, 0xda, 0x93, 0x70, 0x37, 0x29, 0x66, 0xb6, 0x96, 0x30, 0x32, 0xa9, 0x5b, 0x61, 0x5b, 0xe5, 0xb1, 0xf8, 0x5e, 0x05, 0x5d, 0x0c, 0xa9, 0xc4, 0x60, 0xeb, 0x5e, 0x40, 0xa1, 0xc0, 0x61, 0xe6, 0x5f, 0xc5, 0x99, 0x57, 0x62, 0x65, 0x61, 0x64, 0x90, 0xf0, 0x62, 0x58, 0x63, 0xf8, 0x87, 0x41, 0x62, 0x95, 0x66, 0xcd, 0x7d, 0x96, 0x62, 0xd2, 0x6a, 0x6f, 0x73, 0xb1, 0x63, 0xa1, 0x6e, 0x65, 0x6a, 0x98, 0x64, 0x95, 0x72, 0x52, 0x62, 0x01, 0x65, 0xab, 0x77, 0x01, 0x59, 0xb6, 0x67, 0x2e, 0x7b, 0x72, 0x51, 0xc9, 0x68, 0xcb, 0x80, 0x2f, 0x4a, 0xe7, 0x6a, 0x0e, 0x84, 0x95, 0x44, 0x47, 0x6b, 0x92, 0x88, 0x75, 0x3e, 0x6e, 0x6d, 0x31, 0x8b, 0xf5, 0x39, 0xac, 0x6e, 0xfb, 0x8f, 0x49, 0x34, 0xba, 0x68, 0x34, 0x54, 0x33, 0xb8, 0x06, 0x6b, 0x14, 0x55, 0xbc, 0xaf, 0x5a, 0x6c, 0xa7, 0x56, 0xdd, 0xa7, 0xb9, 0x6e, 0x6f, 0x57, 0xf6, 0xa0, 0x12, 0x6e, 0x11, 0x59, 0x7d, 0x97, 0x97, 0x6d, 0xf0, 0x5b, 0x2c, 0x8e, 0xef, 0x6d, 0x91, 0x5d, 0xec, 0x84, 0xd3, 0x6d, 0x6b, 0x61, 0x76, 0x7a, 0x71, 0x6d, 0x7f, 0x65, 0x33, 0x70, 0x6b, 0x6e, 0x30, 0x69, 0x58, 0x67, 0x90, 0x6f, 0x46, 0x6d, 0x5e, 0x5f, 0x2d, 0x70, 0x7e, 0x72, 0x4f, 0x57, 0x61, 0x71, 0xc1, 0x76, 0xa6, 0x4f, 0xb1, 0x73, 0x15, 0x7b, 0x6c, 0x49, 0x01, 0x74, 0x83, 0x7f, 0xf5, 0x42, 0x5f, 0x75, 0xc9, 0x83, 0xf1, 0x3c, 0xab, 0x77, 0x44, 0x87, 0x9e, 0x37, 0x50, 0x74, 0x76, 0x4d, 0x3b, 0xbd, 0xdd, 0x77, 0x17, 0x4e, 0x95, 0xb5, 0x1c, 0x78, 0xf1, 0x4f, 0xd6, 0xac, 0xec, 0x79, 0xbb, 0x50, 0xf9, 0xa5, 0xba, 0x7a, 0x50, 0x52, 0x26, 0x9e, 0x4c, 0x79, 0xae, 0x53, 0xa1, 0x95, 0xf5, 0x79, 0x25, 0x55, 0x50, 0x8d, 0x31, 0x78, 0x80, 0x57, 0xaa, 0x83, 0x36, 0x78, 0x12, 0x5b, 0xd4, 0x78, 0x0f, 0x78, 0x32, 0x60, 0x4f, 0x6d, 0x56, 0x78, 0xd9, 0x64, 0x5a, 0x64, 0xb0, 0x79, 0xc6, 0x68, 0xbc, 0x5c, 0x9f, 0x7a, 0xf7, 0x6d, 0x95, 0x55, 0x12, 0x7c, 0x39, 0x72, 0x32, 0x4d, 0xdb, 0x7d, 0x5b, 0x76, 0xc8, 0x47, 0x3a, 0x7e, 0x93, 0x7b, 0x1c, 0x40, 0x98, 0x7f, 0xff, 0x7f, 0x95, 0x3a, 0x8f, 0x81, 0x7f, 0x47, 0xd0, 0xc2, 0x1a, 0x82, 0xb1, 0x48, 0xd3, 0xba, 0x86, 0x83, 0xfa, 0x49, 0xd8, 0xb2, 0xfc, 0x84, 0x9b, 0x4a, 0xac, 0xab, 0xb1, 0x84, 0xde, 0x4b, 0x6d, 0xa4, 0x98, 0x84, 0xde, 0x4c, 0x71, 0x9c, 0xf8, 0x84, 0x45, 0x4d, 0xd8, 0x94, 0x73, 0x83, 0xc2, 0x4f, 0x8d, 0x8b, 0x3e, 0x83, 0x1f, 0x51, 0x6e, 0x81, 0x74, 0x82, 0xbb, 0x56, 0x63, 0x75, 0xd2, 0x82, 0xe1, 0x5b, 0x38, 0x6b, 0x00, 0x83, 0xa6, 0x5f, 0x84, 0x61, 0xd9, 0x84, 0x56, 0x64, 0x3d, 0x5a, 0x2d, 0x85, 0x62, 0x68, 0xd7, 0x52, 0xce, 0x86, 0xbf, 0x6d, 0xc0, 0x4b, 0xf9, 0x87, 0xd0, 0x72, 0x9b, 0x45, 0x37, 0x88, 0xdc, 0x77, 0x6f, 0x3e, 0x30, 0x8c, 0xfb, 0x43, 0x7b, 0xc4, 0xd2, 0x8e, 0x1a, 0x44, 0x60, 0xbd, 0x8e, 0x8e, 0xd7, 0x45, 0x08, 0xb7, 0x28, 0x8f, 0xa6, 0x45, 0xc1, 0xb0, 0xab, 0x8f, 0x74, 0x46, 0x29, 0xa9, 0xed, 0x8f, 0x3a, 0x46, 0x97, 0xa3, 0x21, 0x8e, 0xaf, 0x47, 0x61, 0x9b, 0x80, 0x8d, 0xea, 0x48, 0x88, 0x93, 0x20, 0x8d, 0x71, 0x4a, 0x4f, 0x89, 0x64, 0x8d, 0x0c, 0x4c, 0x87, 0x7e, 0xf7, 0x8d, 0x39, 0x51, 0xa1, 0x73, 0x3c, 0x8d, 0x62, 0x56, 0xd2, 0x68, 0x42, 0x8e, 0x24, 0x5b, 0x67, 0x5f, 0x1e, 0x8e, 0xc3, 0x60, 0x3d, 0x57, 0xf4, 0x8f, 0xbb, 0x64, 0x92, 0x50, 0xdd, 0x90, 0xf9, 0x69, 0xb0, 0x49, 0xff, 0x92, 0x50, 0x6e, 0xfe, 0x42, 0x51, 0x95, 0xfc, 0x3f, 0xfa, 0xc8, 0x09, 0x97, 0xb8, 0x40, 0xf7, 0xbf, 0x62, 0x98, 0x30, 0x41, 0x53, 0xb9, 0xb2, 0x98, 0xae, 0x41, 0xbb, 0xb3, 0xfa, 0x98, 0xee, 0x42, 0x10, 0xae, 0x0f, 0x98, 0x8e, 0x42, 0x38, 0xa7, 0xa8, 0x98, 0x33, 0x42, 0x64, 0xa1, 0x3f, 0x97, 0x86, 0x43, 0x0c, 0x99, 0x96, 0x96, 0xce, 0x43, 0xe5, 0x91, 0x8f, 0x96, 0x86, 0x46, 0x52, 0x87, 0x18, 0x96, 0x5d, 0x49, 0x77, 0x7c, 0x14, 0x96, 0xb4, 0x4e, 0x01, 0x70, 0xba, 0x97, 0x15, 0x53, 0x6a, 0x65, 0x2b, 0x97, 0xa1, 0x58, 0x3e, 0x5c, 0xe9, 0x98, 0x5c, 0x5c, 0xe5, 0x55, 0xcb, 0x99, 0x81, 0x61, 0x35, 0x4e, 0x6a, 0x9b, 0x29, 0x66, 0xc5, 0x46, 0x50, 0x9e, 0x5b, 0x3d, 0xa8, 0xc8, 0x5d, 0xa0, 0x13, 0x3e, 0x55, 0xc1, 0x1d, 0xa0, 0x93, 0x3e, 0x7a, 0xbb, 0xa6, 0xa0, 0xec, 0x3e, 0x95, 0xb6, 0x6e, 0xa1, 0x47, 0x3e, 0xb1, 0xb1, 0x3a, 0xa1, 0x16, 0x3e, 0xc1, 0xab, 0x5c, 0xa0, 0xb9, 0x3e, 0xc9, 0xa5, 0x49, 0xa0, 0x58, 0x3e, 0xdb, 0x9f, 0x09, 0x9f, 0xd5, 0x3f, 0x50, 0x97, 0x87, 0x9f, 0x41, 0x3f, 0xd0, 0x8f, 0xfc, 0x9f, 0x31, 0x43, 0x6e, 0x84, 0x88, 0x9f, 0x59, 0x47, 0x30, 0x79, 0x53, 0x9f, 0xc8, 0x4b, 0x89, 0x6e, 0x37, 0xa0, 0x47, 0x50, 0xcf, 0x61, 0xe8, 0xa0, 0xc2, 0x55, 0xa6, 0x5a, 0xf0, 0xa1, 0x93, 0x5a, 0x1c, 0x53, 0xd1, 0xa3, 0x11, 0x5e, 0x9a, 0x4b, 0x9e, 0xa5, 0xf4, 0x3b, 0x52, 0xc9, 0xb3, 0xa7, 0x1c, 0x3b, 0xd5, 0xc3, 0x25, 0xa7, 0xc9, 0x3c, 0x0c, 0xbd, 0x7b, 0xa8, 0x16, 0x3c, 0x08, 0xb8, 0x7d, 0xa8, 0x5f, 0x3c, 0x04, 0xb3, 0x86, 0xa8, 0x7a, 0x3c, 0x03, 0xae, 0x52, 0xa8, 0x14, 0x3c, 0x0d, 0xa8, 0x71, 0xa7, 0xae, 0x3c, 0x14, 0xa2, 0x8f, 0xa7, 0x47, 0x3c, 0x58, 0x9b, 0xe5, 0xa6, 0xe5, 0x3c, 0xd9, 0x94, 0x92, 0xa6, 0xa1, 0x3e, 0x05, 0x8c, 0x46, 0xa6, 0xab, 0x40, 0x88, 0x81, 0xf8, 0xa6, 0xfa, 0x44, 0x93, 0x77, 0x0f, 0xa7, 0x59, 0x49, 0x19, 0x6c, 0x29, 0xa7, 0xc1, 0x4e, 0x58, 0x60, 0x69, 0xa8, 0xac, 0x53, 0x15, 0x58, 0xdd, 0xaa, 0x05, 0x58, 0x30, 0x50, 0x15, 0xad, 0x8d, 0x39, 0x3c, 0xca, 0xa6, 0xae, 0x6a, 0x39, 0xb0, 0xc4, 0x96, 0xaf, 0x19, 0x39, 0xf4, 0xbe, 0xf0, 0xaf, 0x53, 0x39, 0xd8, 0xba, 0x1b, 0xaf, 0x87, 0x39, 0xb5, 0xb5, 0x51, 0xaf, 0xb5, 0x39, 0x90, 0xb0, 0x8a, 0xaf, 0x6f, 0x39, 0x8f, 0xab, 0x13, 0xaf, 0x17, 0x39, 0x95, 0xa5, 0x7b, 0xae, 0xba, 0x39, 0x9b, 0x9f, 0xdd, 0xae, 0x74, 0x3a, 0x18, 0x98, 0xc0, 0xae, 0x31, 0x3a, 0x9e, 0x91, 0xa7, 0xae, 0x08, 0x3c, 0x15, 0x89, 0x0b, 0xad, 0xff, 0x3d, 0xf1, 0x7f, 0xb4, 0xae, 0x5e, 0x42, 0x5c, 0x75, 0x13, 0xae, 0xd7, 0x46, 0xfd, 0x6a, 0x6d, 0xaf, 0x4f, 0x4c, 0x16, 0x5f, 0x77, 0xb0, 0x83, 0x50, 0xc8, 0x56, 0x51, 0xb5, 0xb4, 0x36, 0xe1, 0xcb, 0xef, 0xb6, 0x47, 0x37, 0x76, 0xc6, 0x48, 0xb6, 0xf8, 0x38, 0x16, 0xc0, 0xac, 0xb7, 0x27, 0x37, 0xf3, 0xbb, 0xe5, 0xb7, 0x4a, 0x37, 0xba, 0xb7, 0x39, 0xb7, 0x68, 0x37, 0x78, 0xb2, 0x93, 0xb7, 0x4f, 0x37, 0x50, 0xad, 0xa2, 0xb7, 0x05, 0x37, 0x48, 0xa8, 0x49, 0xb6, 0xb5, 0x37, 0x3e, 0xa2, 0xed, 0xb6, 0x64, 0x37, 0x64, 0x9c, 0xf2, 0xb6, 0x30, 0x37, 0xc5, 0x96, 0x0c, 0xb5, 0xef, 0x38, 0x45, 0x8f, 0x19, 0xb5, 0xdd, 0x39, 0xfe, 0x86, 0x76, 0xb5, 0xe8, 0x3c, 0x1f, 0x7d, 0x6e, 0xb6, 0x03, 0x3f, 0xd9, 0x73, 0x3d, 0xb6, 0x59, 0x44, 0x7a, 0x68, 0xa6, 0xb7, 0x1b, 0x49, 0x8a, 0x5d, 0xc8, 0x1b, 0xc4, 0x9a, 0x3f, 0x90, 0x63, 0x1d, 0xe5, 0x9c, 0x39, 0x88, 0x2b, 0x1f, 0xaf, 0x9e, 0x45, 0x80, 0x1e, 0x20, 0xef, 0xa0, 0xa2, 0x77, 0xf1, 0x22, 0x0e, 0xa2, 0xf9, 0x6f, 0xdf, 0x23, 0x3f, 0xa5, 0xfa, 0x66, 0xb9, 0x24, 0x65, 0xa9, 0x3f, 0x5e, 0x35, 0x26, 0x52, 0xac, 0x63, 0x56, 0x80, 0x28, 0x58, 0xae, 0xeb, 0x4e, 0xf0, 0x2b, 0x00, 0xb0, 0xf5, 0x48, 0xd6, 0x2d, 0x2f, 0xb2, 0xe5, 0x42, 0x86, 0x30, 0x11, 0xb4, 0x34, 0x3e, 0x20, 0x31, 0x77, 0xb6, 0x66, 0x3a, 0x75, 0x34, 0xba, 0xb7, 0x29, 0x36, 0xe6, 0x38, 0xf8, 0xb7, 0x21, 0x33, 0x85, 0x3b, 0xeb, 0xb7, 0xca, 0x30, 0x15, 0x3e, 0xdf, 0xb8, 0xc5, 0x2c, 0xb3, 0x23, 0x72, 0x93, 0x30, 0x94, 0x5b, 0x26, 0x17, 0x94, 0x90, 0x8b, 0xc0, 0x28, 0x2a, 0x96, 0x49, 0x83, 0x80, 0x29, 0xce, 0x98, 0x7d, 0x7b, 0x27, 0x2b, 0x07, 0x9b, 0x12, 0x72, 0xc0, 0x2c, 0x4f, 0x9d, 0xe0, 0x69, 0x96, 0x2d, 0x90, 0xa0, 0x84, 0x5f, 0xd7, 0x2f, 0xaa, 0xa4, 0x19, 0x58, 0x0b, 0x31, 0x66, 0xa7, 0x39, 0x4f, 0xec, 0x33, 0xe9, 0xa9, 0xfc, 0x49, 0xd0, 0x36, 0x50, 0xac, 0x48, 0x43, 0x5d, 0x39, 0x00, 0xae, 0x03, 0x3e, 0x8c, 0x3c, 0x0b, 0xaf, 0x53, 0x3b, 0x1f, 0x3f, 0x0b, 0xb0, 0x72, 0x37, 0xd1, 0x41, 0xbb, 0xb1, 0x85, 0x34, 0x8f, 0x44, 0x3c, 0xb2, 0x7e, 0x31, 0x35, 0x46, 0xa3, 0xb3, 0x93, 0x2d, 0xc8, 0x2b, 0x25, 0x8c, 0x3c, 0x98, 0xe3, 0x2e, 0x18, 0x8d, 0x16, 0x8f, 0xb5, 0x30, 0x2f, 0x8e, 0xbf, 0x87, 0x2c, 0x32, 0x1d, 0x90, 0x9f, 0x7e, 0x99, 0x33, 0xb6, 0x93, 0x26, 0x76, 0x04, 0x35, 0x31, 0x95, 0xed, 0x6d, 0x5e, 0x36, 0xb3, 0x98, 0xf7, 0x64, 0x2a, 0x38, 0x5c, 0x9c, 0x3e, 0x5b, 0x61, 0x3a, 0x57, 0x9f, 0x95, 0x52, 0xcf, 0x3c, 0x85, 0xa2, 0xce, 0x4b, 0xf3, 0x3e, 0xc3, 0xa5, 0x98, 0x45, 0xa1, 0x41, 0x0c, 0xa7, 0xe2, 0x3f, 0xb2, 0x43, 0xbd, 0xa9, 0xa6, 0x3c, 0x58, 0x46, 0x64, 0xab, 0x2d, 0x38, 0xfd, 0x48, 0xfb, 0xac, 0x82, 0x35, 0xa2, 0x4b, 0x7a, 0xad, 0xad, 0x32, 0x3e, 0x4d, 0xf7, 0xae, 0xcd, 0x2e, 0xba, 0x33, 0x6c, 0x85, 0x0f, 0x9d, 0xa4, 0x36, 0x57, 0x85, 0xb6, 0x94, 0xb2, 0x39, 0x16, 0x86, 0x8b, 0x8c, 0x28, 0x3b, 0x6d, 0x87, 0xe9, 0x83, 0xa6, 0x3d, 0x01, 0x8a, 0x70, 0x7a, 0xa6, 0x3e, 0x47, 0x8d, 0x53, 0x71, 0x77, 0x3f, 0xde, 0x90, 0xbd, 0x68, 0x6d, 0x41, 0x6c, 0x93, 0xe0, 0x5f, 0xad, 0x43, 0x0f, 0x97, 0x88, 0x57, 0x85, 0x44, 0xe1, 0x9a, 0xdf, 0x4f, 0x6d, 0x47, 0x3b, 0x9e, 0x58, 0x48, 0xf5, 0x49, 0x89, 0xa1, 0x39, 0x42, 0x81, 0x4b, 0xbd, 0xa3, 0x8e, 0x3e, 0x0c, 0x4e, 0x04, 0xa5, 0x8c, 0x3a, 0x84, 0x50, 0x48, 0xa7, 0x43, 0x37, 0x05, 0x52, 0x88, 0xa8, 0xc5, 0x33, 0x89, 0x54, 0xdb, 0xaa, 0x2f, 0x2f, 0xda, 0x3c, 0x15, 0x7d, 0x97, 0xa2, 0x3c, 0x3f, 0x19, 0x7e, 0x0d, 0x99, 0xad, 0x42, 0x10, 0x7e, 0x80, 0x91, 0x62, 0x44, 0xa7, 0x7f, 0x90, 0x88, 0xec, 0x47, 0x47, 0x80, 0x80, 0x80, 0x80, 0x48, 0x5c, 0x84, 0x2b, 0x77, 0x28, 0x49, 0x8d, 0x87, 0x72, 0x6d, 0xfe, 0x4a, 0xe0, 0x8b, 0x0c, 0x65, 0x00, 0x4c, 0x4e, 0x8e, 0xb7, 0x5c, 0x5c, 0x4d, 0xe9, 0x92, 0x6a, 0x54, 0x6b, 0x4f, 0xaf, 0x96, 0x0c, 0x4d, 0x3b, 0x51, 0xc0, 0x99, 0x8b, 0x46, 0xe2, 0x53, 0xf2, 0x9c, 0x90, 0x40, 0x77, 0x56, 0x29, 0x9f, 0x47, 0x3c, 0x78, 0x58, 0x36, 0xa1, 0x80, 0x38, 0xc7, 0x5a, 0x2d, 0xa3, 0x5f, 0x35, 0x27, 0x5c, 0x54, 0xa5, 0x2e, 0x31, 0x28, 0x47, 0x10, 0x75, 0x2b, 0xa7, 0xe4, 0x4a, 0x6a, 0x75, 0x77, 0x9f, 0x78, 0x4c, 0xdf, 0x76, 0x52, 0x97, 0x25, 0x4f, 0x47, 0x77, 0x43, 0x8e, 0xc3, 0x51, 0x16, 0x78, 0xf2, 0x86, 0x20, 0x52, 0xaf, 0x7a, 0xfa, 0x7d, 0x44, 0x53, 0x99, 0x7e, 0x3d, 0x73, 0xd9, 0x54, 0xca, 0x81, 0xd4, 0x6a, 0xe3, 0x55, 0xfd, 0x85, 0x5b, 0x62, 0x2a, 0x57, 0x52, 0x89, 0x60, 0x59, 0xe6, 0x58, 0xf5, 0x8d, 0x29, 0x51, 0xf3, 0x5a, 0xb6, 0x91, 0x2c, 0x4b, 0x48, 0x5c, 0x78, 0x94, 0xd6, 0x44, 0xe2, 0x5e, 0x51, 0x97, 0xfc, 0x3f, 0x12, 0x60, 0x32, 0x9a, 0xae, 0x3b, 0x08, 0x62, 0x25, 0x9d, 0x1c, 0x37, 0x06, 0x64, 0x42, 0x9f, 0x69, 0x32, 0xd8, 0x52, 0x72, 0x6c, 0x79, 0xad, 0xce, 0x55, 0xb0, 0x6d, 0x06, 0xa5, 0x70, 0x58, 0x70, 0x6d, 0xc7, 0x9d, 0x25, 0x5a, 0x40, 0x6e, 0xfd, 0x94, 0x8e, 0x5b, 0xd8, 0x70, 0xb5, 0x8b, 0xcb, 0x5d, 0x2b, 0x72, 0xc7, 0x83, 0x31, 0x5e, 0x05, 0x75, 0x83, 0x7a, 0x22, 0x5e, 0xcb, 0x78, 0xad, 0x70, 0xec, 0x5f, 0xd6, 0x7c, 0x67, 0x68, 0x32, 0x61, 0x29, 0x80, 0x05, 0x5f, 0xa6, 0x62, 0x76, 0x84, 0x45, 0x57, 0xa2, 0x64, 0x00, 0x88, 0x1c, 0x4f, 0xcf, 0x65, 0x7e, 0x8c, 0x53, 0x49, 0x5b, 0x67, 0x46, 0x90, 0x0e, 0x43, 0x04, 0x68, 0xc8, 0x93, 0x4b, 0x3d, 0xb3, 0x6a, 0x61, 0x96, 0x2a, 0x39, 0x33, 0x6c, 0x42, 0x98, 0xf2, 0x34, 0x5d, 0x5e, 0x11, 0x63, 0x3f, 0xb3, 0xce, 0x61, 0x34, 0x64, 0x5a, 0xab, 0x80, 0x64, 0x4f, 0x65, 0x74, 0xa3, 0x69, 0x66, 0x07, 0x66, 0xc6, 0x9b, 0x14, 0x67, 0x0d, 0x68, 0x47, 0x92, 0x90, 0x67, 0xc8, 0x6a, 0x6f, 0x89, 0x83, 0x68, 0xaa, 0x6c, 0xba, 0x80, 0x84, 0x69, 0x45, 0x70, 0x2f, 0x77, 0x01, 0x69, 0xe8, 0x73, 0x86, 0x6e, 0x00, 0x6a, 0xe0, 0x77, 0x2d, 0x65, 0x84, 0x6c, 0x28, 0x7b, 0x0d, 0x5d, 0x47, 0x6d, 0xa9, 0x7f, 0x3a, 0x55, 0x8a, 0x6e, 0xee, 0x83, 0x42, 0x4e, 0x14, 0x70, 0x21, 0x87, 0x8a, 0x47, 0x79, 0x71, 0xa0, 0x8b, 0x4a, 0x41, 0x13, 0x73, 0x38, 0x8e, 0xb6, 0x3b, 0xf1, 0x74, 0xd1, 0x91, 0xec, 0x36, 0xa2, 0x6a, 0x6d, 0x5b, 0x38, 0xba, 0x78, 0x6e, 0x23, 0x5c, 0x9e, 0xb0, 0xd7, 0x70, 0x4a, 0x5d, 0x9b, 0xa8, 0xf3, 0x72, 0x50, 0x5e, 0x8b, 0xa1, 0x18, 0x72, 0xc5, 0x60, 0x1d, 0x98, 0xa9, 0x73, 0x33, 0x61, 0xd9, 0x90, 0x54, 0x73, 0x49, 0x64, 0x71, 0x86, 0xfe, 0x73, 0x9f, 0x67, 0x31, 0x7d, 0xaa, 0x74, 0x08, 0x6a, 0xd3, 0x73, 0xf0, 0x74, 0xe6, 0x6e, 0x91, 0x6b, 0x23, 0x75, 0xdb, 0x72, 0x30, 0x62, 0xdf, 0x76, 0xe6, 0x76, 0x4c, 0x5a, 0xed, 0x78, 0x36, 0x7a, 0x49, 0x53, 0x55, 0x79, 0x97, 0x7e, 0x69, 0x4c, 0x58, 0x7a, 0xab, 0x82, 0xbb, 0x45, 0xa6, 0x7b, 0xeb, 0x86, 0x94, 0x3f, 0x48, 0x7d, 0x72, 0x8a, 0x59, 0x39, 0x6e, 0x77, 0x08, 0x53, 0x9d, 0xbf, 0x75, 0x79, 0xd3, 0x55, 0x2a, 0xb6, 0xb0, 0x7c, 0x23, 0x56, 0x80, 0xae, 0x6d, 0x7d, 0x53, 0x57, 0x92, 0xa7, 0x01, 0x7e, 0x89, 0x58, 0xa6, 0x9f, 0x76, 0x7e, 0x71, 0x5a, 0x41, 0x97, 0x1b, 0x7e, 0x88, 0x5c, 0x07, 0x8e, 0x87, 0x7e, 0x6d, 0x5e, 0xb8, 0x84, 0xae, 0x7e, 0x79, 0x62, 0x0e, 0x7a, 0xa3, 0x7e, 0xbe, 0x65, 0xa7, 0x70, 0xbe, 0x7f, 0x96, 0x69, 0x7e, 0x68, 0x5d, 0x80, 0xa0, 0x6d, 0x3b, 0x60, 0x4d, 0x81, 0xaf, 0x71, 0xaf, 0x58, 0xbb, 0x82, 0xe7, 0x75, 0x86, 0x51, 0x3b, 0x83, 0xf9, 0x79, 0xed, 0x4a, 0x6d, 0x85, 0x0d, 0x7e, 0x47, 0x43, 0xb6, 0x86, 0x4c, 0x82, 0xca, 0x3c, 0xc1, 0x83, 0x8d, 0x4d, 0x13, 0xc4, 0x8b, 0x85, 0x6e, 0x4e, 0x66, 0xbb, 0x79, 0x87, 0x33, 0x4f, 0x88, 0xb3, 0x6b, 0x88, 0x54, 0x50, 0xb0, 0xab, 0xf4, 0x89, 0x05, 0x51, 0xd5, 0xa4, 0xe3, 0x89, 0x79, 0x53, 0x18, 0x9d, 0x69, 0x89, 0x4a, 0x54, 0xa1, 0x95, 0x40, 0x89, 0x29, 0x56, 0x63, 0x8c, 0x88, 0x88, 0xf5, 0x58, 0xb8, 0x82, 0xc8, 0x88, 0xf8, 0x5c, 0xc4, 0x78, 0x00, 0x89, 0x44, 0x60, 0xe0, 0x6d, 0xba, 0x8a, 0x36, 0x64, 0xbe, 0x65, 0x74, 0x8b, 0x3c, 0x68, 0xc1, 0x5d, 0xa7, 0x8c, 0x77, 0x6c, 0xf8, 0x56, 0x75, 0x8e, 0x07, 0x70, 0xd8, 0x4f, 0x51, 0x8e, 0x8c, 0x75, 0xc2, 0x48, 0x5c, 0x8f, 0x63, 0x7b, 0x01, 0x40, 0x4e, 0x8e, 0xde, 0x48, 0x93, 0xc7, 0xe9, 0x90, 0xaa, 0x49, 0x9b, 0xbe, 0xc1, 0x91, 0x9a, 0x4a, 0x61, 0xb8, 0x02, 0x92, 0xa8, 0x4b, 0x40, 0xb1, 0x27, 0x93, 0x09, 0x4b, 0xfd, 0xaa, 0x3c, 0x93, 0x67, 0x4c, 0xca, 0xa3, 0x45, 0x93, 0x80, 0x4d, 0xe2, 0x9b, 0xa0, 0x93, 0x60, 0x4f, 0x48, 0x93, 0x64, 0x93, 0x31, 0x51, 0x08, 0x8a, 0x3e, 0x92, 0xf2, 0x53, 0x1c, 0x80, 0x81, 0x93, 0x2c, 0x57, 0xd4, 0x75, 0x97, 0x93, 0x9e, 0x5c, 0x4c, 0x6b, 0x58, 0x94, 0xa5, 0x60, 0x5f, 0x62, 0x90, 0x95, 0x79, 0x64, 0x96, 0x5b, 0x35, 0x96, 0xb4, 0x68, 0x9b, 0x54, 0x12, 0x98, 0x42, 0x6c, 0xc7, 0x4c, 0xe0, 0x99, 0x80, 0x72, 0x1d, 0x44, 0xbd, 0x97, 0xcc, 0x44, 0x79, 0xcb, 0xb3, 0x9a, 0x37, 0x45, 0xde, 0xc1, 0x0d, 0x9b, 0x0c, 0x46, 0x68, 0xba, 0xe6, 0x9b, 0xd2, 0x46, 0xf8, 0xb4, 0xdd, 0x9c, 0x7d, 0x47, 0x86, 0xae, 0xb7, 0x9c, 0x8d, 0x47, 0xfc, 0xa8, 0x1c, 0x9c, 0xb5, 0x48, 0x7f, 0xa1, 0x7b, 0x9c, 0x89, 0x49, 0x64, 0x99, 0xd6, 0x9c, 0x58, 0x4a, 0x75, 0x91, 0xdf, 0x9c, 0x48, 0x4c, 0xa1, 0x88, 0x02, 0x9c, 0x5b, 0x4f, 0x33, 0x7d, 0xb7, 0x9c, 0xd1, 0x53, 0xbc, 0x73, 0x0a, 0x9d, 0x53, 0x58, 0x87, 0x68, 0xd0, 0x9e, 0x4a, 0x5c, 0xa7, 0x5f, 0xe6, 0x9f, 0x43, 0x60, 0xed, 0x58, 0xb3, 0xa0, 0x92, 0x64, 0xd1, 0x51, 0x87, 0xa2, 0x91, 0x6a, 0x2f, 0x48, 0x85, 0xa0, 0x6f, 0x41, 0x6a, 0xcc, 0x1b, 0xa2, 0x96, 0x42, 0x82, 0xc3, 0x03, 0xa3, 0x8b, 0x42, 0xfd, 0xbc, 0xdd, 0xa4, 0x16, 0x43, 0x50, 0xb7, 0x62, 0xa4, 0xa7, 0x43, 0xa8, 0xb1, 0xe4, 0xa4, 0xcb, 0x44, 0x00, 0xab, 0xdf, 0xa4, 0xbb, 0x44, 0x57, 0xa5, 0x94, 0xa4, 0xb2, 0x44, 0xc1, 0x9f, 0x20, 0xa4, 0x87, 0x45, 0x95, 0x97, 0x98, 0xa4, 0x68, 0x46, 0x7b, 0x90, 0x02, 0xa4, 0x7a, 0x49, 0x28, 0x85, 0xa2, 0xa4, 0xc0, 0x4c, 0x3f, 0x7b, 0x30, 0xa5, 0x5d, 0x4f, 0xef, 0x70, 0x89, 0xa5, 0xda, 0x55, 0x41, 0x66, 0x31, 0xa6, 0xc9, 0x59, 0xae, 0x5d, 0xbb, 0xa7, 0xf1, 0x5d, 0xfa, 0x56, 0x20, 0xa9, 0xeb, 0x62, 0x8f, 0x4d, 0x34, 0xa8, 0x7d, 0x3e, 0xff, 0xcb, 0x7e, 0xa9, 0xdb, 0x3f, 0x7d, 0xc4, 0xb2, 0xaa, 0xea, 0x3f, 0xd8, 0xbe, 0x80, 0xab, 0x67, 0x3f, 0xff, 0xb9, 0x5c, 0xab, 0xe9, 0x40, 0x2c, 0xb4, 0x3e, 0xac, 0x53, 0x40, 0x64, 0xae, 0xfa, 0xac, 0x21, 0x40, 0xb1, 0xa8, 0xef, 0xab, 0xf7, 0x41, 0x05, 0xa2, 0xde, 0xab, 0xdc, 0x41, 0x93, 0x9c, 0x39, 0xab, 0xd0, 0x42, 0x5f, 0x95, 0x03, 0xab, 0xda, 0x43, 0xa6, 0x8d, 0x0d, 0xac, 0x06, 0x45, 0xf3, 0x83, 0x3f, 0xac, 0x73, 0x49, 0x6a, 0x78, 0xf0, 0xad, 0x09, 0x4d, 0x3e, 0x6e, 0x61, 0xad, 0x99, 0x52, 0x73, 0x63, 0xb7, 0xae, 0xd3, 0x56, 0xf9, 0x5b, 0x9b, 0xb0, 0xc5, 0x5c, 0x89, 0x50, 0xf6, 0xb0, 0x31, 0x3c, 0xcd, 0xcc, 0x6e, 0xb1, 0x39, 0x3d, 0x2f, 0xc6, 0x51, 0xb2, 0x52, 0x3d, 0xa1, 0xc0, 0x59, 0xb2, 0xcb, 0x3d, 0xb3, 0xbb, 0x5f, 0xb3, 0x40, 0x3d, 0xc4, 0xb6, 0x70, 0xb3, 0xb4, 0x3d, 0xd4, 0xb1, 0x7f, 0xb3, 0xb5, 0x3d, 0xfc, 0xab, 0xf5, 0xb3, 0x86, 0x3e, 0x30, 0xa6, 0x22, 0xb3, 0x53, 0x3e, 0x69, 0xa0, 0x47, 0xb3, 0x42, 0x3e, 0xf4, 0x99, 0x68, 0xb3, 0x39, 0x3f, 0x8a, 0x92, 0x7e, 0xb3, 0x48, 0x40, 0xed, 0x8a, 0x3a, 0xb3, 0x81, 0x42, 0xf0, 0x81, 0x04, 0xb4, 0x03, 0x46, 0xd4, 0x76, 0xdf, 0xb4, 0x8f, 0x4a, 0xd4, 0x6c, 0x77, 0xb5, 0x0e, 0x4f, 0xd3, 0x61, 0x61, 0xb6, 0xc7, 0x54, 0x81, 0x58, 0x1e, 0xb8, 0x62, 0x3a, 0x23, 0xcd, 0xc5, 0xb9, 0x2f, 0x3a, 0xc4, 0xc8, 0x1f, 0xba, 0x32, 0x3b, 0x81, 0xc2, 0x90, 0xba, 0xc4, 0x3b, 0xc3, 0xbd, 0x75, 0xbb, 0x18, 0x3b, 0xba, 0xb8, 0x99, 0xbb, 0x6b, 0x3b, 0xac, 0xb3, 0xbf, 0xbb, 0xa2, 0x3b, 0xa4, 0xae, 0xc2, 0xbb, 0x82, 0x3b, 0xc5, 0xa9, 0x2c, 0xbb, 0x6d, 0x3b, 0xe4, 0xa3, 0x97, 0xbb, 0x49, 0x3c, 0x20, 0x9d, 0xa1, 0xbb, 0x3c, 0x3c, 0x85, 0x96, 0xea, 0xbb, 0x2a, 0x3c, 0xf4, 0x90, 0x3c, 0xbb, 0x4a, 0x3e, 0x95, 0x87, 0xbc, 0xbb, 0x84, 0x40, 0x70, 0x7e, 0xe5, 0xbc, 0x0d, 0x43, 0xcf, 0x75, 0x60, 0xbc, 0x1c, 0x48, 0x3d, 0x6a, 0xa8, 0xbc, 0xe8, 0x4d, 0x55, 0x5f, 0x36, 0x22, 0xb7, 0xa0, 0xa5, 0x95, 0x0b, 0x24, 0xed, 0xa2, 0x10, 0x8c, 0xce, 0x26, 0x6d, 0xa3, 0xdb, 0x84, 0xe2, 0x27, 0x9d, 0xa5, 0xe4, 0x7c, 0xd7, 0x28, 0x7b, 0xa8, 0x61, 0x74, 0x9a, 0x29, 0x5c, 0xab, 0x07, 0x6c, 0x08, 0x2a, 0x3e, 0xad, 0xe7, 0x63, 0x23, 0x2b, 0xb5, 0xb0, 0x88, 0x5a, 0xd4, 0x2c, 0x8b, 0xb3, 0x0f, 0x52, 0x73, 0x2e, 0x72, 0xb5, 0x1b, 0x4c, 0x0e, 0x30, 0x2d, 0xb7, 0x24, 0x46, 0x2a, 0x33, 0x95, 0xb7, 0xd4, 0x40, 0x72, 0x34, 0x9e, 0xba, 0x19, 0x3c, 0x91, 0x39, 0x44, 0xb9, 0xc9, 0x38, 0xf3, 0x3c, 0xa9, 0xba, 0x29, 0x35, 0x72, 0x3f, 0xa6, 0xba, 0xa7, 0x32, 0x09, 0x42, 0x58, 0xbb, 0x5f, 0x2e, 0x84, 0x2a, 0x24, 0x99, 0xc0, 0x99, 0x27, 0x2c, 0xdc, 0x9a, 0xb0, 0x90, 0x45, 0x2e, 0xa8, 0x9c, 0x45, 0x88, 0x0d, 0x30, 0x4e, 0x9d, 0xcb, 0x7f, 0xd3, 0x31, 0x4d, 0xa0, 0x84, 0x77, 0x59, 0x32, 0x3a, 0xa3, 0x2c, 0x6e, 0xf2, 0x33, 0x74, 0xa5, 0xc3, 0x65, 0x8e, 0x34, 0xda, 0xa8, 0x98, 0x5c, 0xc7, 0x36, 0x57, 0xab, 0x67, 0x54, 0x3f, 0x38, 0x49, 0xad, 0xd3, 0x4c, 0xe5, 0x3a, 0xbf, 0xaf, 0xd2, 0x46, 0x93, 0x3c, 0xfc, 0xb1, 0x72, 0x40, 0x76, 0x3f, 0xdb, 0xb2, 0x9f, 0x3c, 0xf5, 0x42, 0x99, 0xb3, 0xb0, 0x39, 0xb4, 0x45, 0x3c, 0xb4, 0x9e, 0x36, 0x67, 0x47, 0xb9, 0xb5, 0x73, 0x33, 0x04, 0x4a, 0x1e, 0xb6, 0x39, 0x2f, 0x86, 0x30, 0xe9, 0x93, 0x0b, 0x9d, 0xfb, 0x34, 0x1b, 0x93, 0xbd, 0x94, 0x22, 0x36, 0x90, 0x94, 0xe6, 0x8b, 0x86, 0x38, 0x7a, 0x96, 0x93, 0x83, 0x21, 0x39, 0xfc, 0x98, 0xd5, 0x7a, 0xa4, 0x3b, 0x14, 0x9b, 0x73, 0x72, 0x30, 0x3c, 0x43, 0x9e, 0x2d, 0x68, 0xed, 0x3d, 0x7b, 0xa0, 0xc1, 0x5f, 0x58, 0x3f, 0x64, 0xa3, 0xda, 0x57, 0x3b, 0x41, 0x17, 0xa6, 0x8c, 0x4f, 0x2d, 0x43, 0x65, 0xa9, 0x19, 0x49, 0x07, 0x45, 0xa0, 0xab, 0x38, 0x42, 0xa4, 0x48, 0x26, 0xac, 0xe5, 0x3e, 0x2d, 0x4a, 0xc8, 0xae, 0x47, 0x3a, 0xca, 0x4d, 0x54, 0xaf, 0x78, 0x37, 0x69, 0x4f, 0xa8, 0xb0, 0x81, 0x34, 0x05, 0x51, 0xde, 0xb1, 0x85, 0x30, 0x84, 0x38, 0x5b, 0x8c, 0x48, 0xa1, 0x69, 0x3b, 0x93, 0x8c, 0xb3, 0x98, 0x6e, 0x3e, 0xc1, 0x8d, 0x16, 0x8f, 0xbc, 0x40, 0xa7, 0x8e, 0xd6, 0x86, 0xef, 0x42, 0x75, 0x90, 0xcf, 0x7e, 0x2d, 0x44, 0x19, 0x93, 0x36, 0x75, 0xa8, 0x45, 0x87, 0x95, 0xdf, 0x6d, 0x1c, 0x46, 0xd7, 0x98, 0xc4, 0x64, 0x0d, 0x48, 0x4f, 0x9b, 0xdd, 0x5b, 0x5f, 0x4a, 0x19, 0x9e, 0xf4, 0x53, 0x0b, 0x4c, 0x29, 0xa1, 0xee, 0x4c, 0x33, 0x4e, 0x4a, 0xa4, 0x85, 0x46, 0x0f, 0x50, 0x66, 0xa6, 0xb0, 0x3f, 0xf6, 0x52, 0xa3, 0xa8, 0x91, 0x3c, 0x70, 0x54, 0xdd, 0xaa, 0x2c, 0x38, 0xef, 0x57, 0x0b, 0xab, 0x99, 0x35, 0x70, 0x59, 0x51, 0xac, 0xf6, 0x31, 0xab, 0x40, 0x47, 0x85, 0x27, 0xa5, 0xc6, 0x44, 0x09, 0x85, 0x4e, 0x9d, 0x1c, 0x47, 0x4b, 0x85, 0xa7, 0x94, 0xb7, 0x4a, 0x25, 0x86, 0x75, 0x8c, 0x3d, 0x4c, 0x63, 0x87, 0xea, 0x83, 0xa7, 0x4d, 0xf1, 0x8a, 0x4b, 0x7a, 0xb6, 0x4f, 0x33, 0x8c, 0xe4, 0x71, 0x9f, 0x50, 0x72, 0x90, 0x0f, 0x68, 0xa0, 0x51, 0xa4, 0x93, 0x19, 0x5f, 0xf3, 0x53, 0x49, 0x96, 0xa0, 0x58, 0x29, 0x55, 0x02, 0x99, 0xce, 0x50, 0x3e, 0x57, 0x09, 0x9d, 0x20, 0x49, 0xdd, 0x59, 0x2f, 0x9f, 0xfe, 0x43, 0x5d, 0x5b, 0x11, 0xa2, 0x54, 0x3e, 0x6f, 0x5c, 0xf8, 0xa4, 0x59, 0x3a, 0xc6, 0x5e, 0xdb, 0xa6, 0x21, 0x37, 0x1f, 0x60, 0xf5, 0xa7, 0xe1, 0x33, 0x01, 0x49, 0xd9, 0x7d, 0x6b, 0xaa, 0xda, 0x4d, 0xa2, 0x7d, 0x85, 0xa2, 0x48, 0x50, 0xac, 0x7d, 0xf3, 0x99, 0xe8, 0x53, 0x9a, 0x7e, 0x68, 0x91, 0x79, 0x55, 0xf0, 0x7f, 0x82, 0x88, 0xf2, 0x58, 0x17, 0x80, 0x80, 0x80, 0x80, 0x59, 0x3b, 0x83, 0xfa, 0x77, 0x65, 0x5a, 0x59, 0x87, 0x0a, 0x6e, 0x74, 0x5b, 0x87, 0x8a, 0x47, 0x65, 0xc9, 0x5c, 0xdf, 0x8d, 0x86, 0x5d, 0x60, 0x5e, 0x8f, 0x91, 0x2d, 0x55, 0x92, 0x60, 0x31, 0x94, 0xbb, 0x4e, 0x3f, 0x61, 0xda, 0x98, 0x39, 0x47, 0xe1, 0x63, 0xa8, 0x9b, 0x34, 0x41, 0x63, 0x65, 0x7f, 0x9d, 0xc3, 0x3c, 0xf0, 0x67, 0x66, 0xa0, 0x14, 0x38, 0xd7, 0x69, 0x2a, 0xa2, 0x13, 0x34, 0xb0, 0x55, 0x4a, 0x74, 0x96, 0xb0, 0x6d, 0x58, 0xc4, 0x74, 0xef, 0xa7, 0xf5, 0x5c, 0x36, 0x75, 0x4d, 0x9f, 0xbf, 0x5e, 0x8b, 0x76, 0x46, 0x97, 0x2f, 0x60, 0xc7, 0x77, 0x4e, 0x8e, 0x99, 0x62, 0x35, 0x79, 0x03, 0x86, 0x12, 0x63, 0x76, 0x7b, 0x04, 0x7d, 0x64, 0x64, 0x76, 0x7e, 0x1d, 0x74, 0x5b, 0x65, 0xa5, 0x81, 0x6f, 0x6b, 0xa7, 0x66, 0xc8, 0x84, 0xc1, 0x63, 0x2b, 0x68, 0x12, 0x88, 0x5b, 0x5b, 0x0f, 0x69, 0xaa, 0x8b, 0xf4, 0x53, 0x3c, 0x6b, 0x51, 0x8f, 0xa7, 0x4c, 0x4c, 0x6c, 0xb6, 0x93, 0x3e, 0x46, 0x05, 0x6e, 0x3a, 0x96, 0x4d, 0x3f, 0xe5, 0x6f, 0xc7, 0x99, 0x13, 0x3b, 0x4a, 0x71, 0x8d, 0x9b, 0x9f, 0x36, 0x7f, 0x60, 0x91, 0x6b, 0x72, 0xb6, 0x0f, 0x64, 0x1e, 0x6c, 0x36, 0xad, 0x7c, 0x67, 0x5a, 0x6c, 0xe4, 0xa5, 0x4e, 0x69, 0xdf, 0x6d, 0xcb, 0x9c, 0xf5, 0x6b, 0x82, 0x6f, 0x23, 0x94, 0x5e, 0x6c, 0xf1, 0x70, 0xde, 0x8b, 0xbf, 0x6d, 0xfc, 0x72, 0xdd, 0x83, 0x35, 0x6e, 0xbc, 0x75, 0x98, 0x7a, 0x51, 0x6f, 0x9f, 0x78, 0xb3, 0x71, 0x61, 0x70, 0xd1, 0x7c, 0x1b, 0x68, 0xf6, 0x72, 0x56, 0x7f, 0x7a, 0x60, 0xb1, 0x73, 0x6a, 0x83, 0x47, 0x58, 0xe4, 0x74, 0xb9, 0x86, 0xc8, 0x51, 0x23, 0x75, 0xf5, 0x8a, 0xc4, 0x4a, 0x65, 0x77, 0x77, 0x8e, 0x5f, 0x44, 0x18, 0x78, 0xf8, 0x91, 0x95, 0x3e, 0x47, 0x7a, 0x5f, 0x94, 0x96, 0x38, 0xd8, 0x6d, 0x12, 0x62, 0xbe, 0xbb, 0xec, 0x70, 0xaf, 0x63, 0xe3, 0xb2, 0xff, 0x73, 0x4f, 0x64, 0xce, 0xaa, 0xce, 0x75, 0x9d, 0x65, 0xaf, 0xa2, 0xd4, 0x76, 0xe9, 0x67, 0x0a, 0x9a, 0x96, 0x77, 0xe9, 0x68, 0x96, 0x92, 0x31, 0x78, 0x9b, 0x6a, 0xc0, 0x89, 0x56, 0x79, 0x66, 0x6d, 0x04, 0x80, 0x8f, 0x7a, 0x1b, 0x70, 0x6c, 0x77, 0x3f, 0x7a, 0xcf, 0x73, 0xaa, 0x6e, 0x6f, 0x7b, 0xc4, 0x77, 0x1b, 0x66, 0x43, 0x7d, 0x00, 0x7a, 0x99, 0x5e, 0x40, 0x7e, 0x65, 0x7e, 0x40, 0x56, 0xcd, 0x7f, 0xcd, 0x81, 0x99, 0x4f, 0x70, 0x80, 0x9d, 0x85, 0xde, 0x48, 0xa3, 0x81, 0xdc, 0x89, 0x9e, 0x42, 0x2c, 0x83, 0x61, 0x8d, 0x3a, 0x3c, 0x04, 0x79, 0x4b, 0x5a, 0x87, 0xc1, 0x69, 0x7c, 0x3a, 0x5b, 0xd8, 0xb8, 0xa4, 0x7f, 0x23, 0x5d, 0x1a, 0xb0, 0x33, 0x80, 0xc7, 0x5e, 0x1d, 0xa8, 0x71, 0x82, 0x7b, 0x5f, 0x24, 0xa0, 0x8e, 0x82, 0xff, 0x60, 0xd0, 0x98, 0x52, 0x83, 0xa3, 0x62, 0x7e, 0x90, 0x22, 0x83, 0xe1, 0x65, 0x06, 0x86, 0xe9, 0x84, 0x53, 0x67, 0xb2, 0x7d, 0xc3, 0x84, 0xe3, 0x6b, 0x28, 0x74, 0x43, 0x85, 0xda, 0x6e, 0xb8, 0x6b, 0xb8, 0x86, 0xd1, 0x72, 0x36, 0x63, 0xb8, 0x87, 0xe7, 0x75, 0xd4, 0x5c, 0x01, 0x89, 0x3f, 0x79, 0x79, 0x54, 0xa5, 0x8a, 0x8c, 0x7d, 0x52, 0x4d, 0x8a, 0x8b, 0x4f, 0x81, 0xa2, 0x46, 0xb3, 0x8c, 0x31, 0x85, 0xb8, 0x3f, 0xa7, 0x85, 0xa1, 0x52, 0xfb, 0xc6, 0x4a, 0x87, 0xfd, 0x54, 0xaa, 0xbd, 0x14, 0x8a, 0x06, 0x56, 0x03, 0xb5, 0x32, 0x8b, 0xb7, 0x57, 0x42, 0xad, 0x96, 0x8c, 0xc3, 0x58, 0x5e, 0xa6, 0x25, 0x8d, 0xba, 0x59, 0x90, 0x9e, 0x76, 0x8e, 0x16, 0x5b, 0x20, 0x96, 0x62, 0x8e, 0x90, 0x5c, 0xe0, 0x8e, 0x02, 0x8e, 0xcb, 0x5f, 0x94, 0x84, 0x50, 0x8f, 0x00, 0x62, 0xb5, 0x7a, 0xac, 0x8f, 0x65, 0x66, 0x1c, 0x71, 0x15, 0x90, 0x71, 0x69, 0xd3, 0x68, 0xe4, 0x91, 0xa9, 0x6d, 0x6a, 0x61, 0x04, 0x92, 0xff, 0x71, 0x21, 0x59, 0xc2, 0x94, 0x5b, 0x74, 0xa7, 0x52, 0x72, 0x95, 0x3d, 0x79, 0x07, 0x4b, 0x3c, 0x96, 0x1f, 0x7d, 0xbe, 0x43, 0x99, 0x90, 0xf7, 0x4d, 0x98, 0xc9, 0xc4, 0x93, 0x0d, 0x4e, 0x8e, 0xc0, 0x61, 0x94, 0x5d, 0x4f, 0x79, 0xb9, 0x3c, 0x95, 0xd6, 0x50, 0x9d, 0xb2, 0x0f, 0x96, 0xcc, 0x51, 0xce, 0xab, 0x0c, 0x97, 0x98, 0x52, 0xef, 0xa3, 0xf4, 0x98, 0x1c, 0x54, 0x35, 0x9c, 0x67, 0x98, 0x4f, 0x55, 0xae, 0x94, 0x6e, 0x98, 0x84, 0x57, 0x88, 0x8b, 0xc2, 0x98, 0xae, 0x5a, 0x01, 0x82, 0x32, 0x99, 0x12, 0x5d, 0xca, 0x77, 0xea, 0x99, 0x9c, 0x61, 0x85, 0x6e, 0x2b, 0x9a, 0xa1, 0x65, 0x40, 0x66, 0x23, 0x9b, 0xdb, 0x68, 0xdb, 0x5e, 0x7c, 0x9d, 0x6e, 0x6c, 0x82, 0x57, 0x4d, 0x9f, 0x8e, 0x6f, 0xa2, 0x50, 0x02, 0xa0, 0x3c, 0x74, 0xfc, 0x47, 0xf5, 0x9a, 0x39, 0x49, 0x1d, 0xcd, 0x4e, 0x9c, 0x9f, 0x4a, 0x59, 0xc3, 0x75, 0x9e, 0x0c, 0x4b, 0x2d, 0xbc, 0x79, 0x9f, 0x21, 0x4b, 0xeb, 0xb6, 0x1d, 0xa0, 0x3e, 0x4c, 0xb8, 0xaf, 0xac, 0xa0, 0xb6, 0x4d, 0x71, 0xa8, 0xd3, 0xa1, 0x4e, 0x4e, 0x3c, 0xa1, 0xe7, 0xa1, 0x9a, 0x4f, 0x50, 0x9a, 0x3a, 0xa1, 0xd0, 0x50, 0x92, 0x92, 0x4c, 0xa1, 0xdc, 0x52, 0xa2, 0x89, 0x37, 0xa1, 0xfe, 0x55, 0x15, 0x7f, 0xb8, 0xa2, 0x7e, 0x59, 0x4f, 0x75, 0x79, 0xa3, 0x3c, 0x5d, 0x5e, 0x6b, 0xbc, 0xa4, 0x52, 0x61, 0x24, 0x63, 0x52, 0xa5, 0x72, 0x65, 0x13, 0x5b, 0xcd, 0xa7, 0x22, 0x68, 0xec, 0x54, 0x5c, 0xa9, 0x9c, 0x6d, 0x0e, 0x4c, 0x13, 0xa3, 0x09, 0x45, 0xdf, 0xcd, 0xa8, 0xa4, 0xfb, 0x46, 0xd3, 0xc5, 0x8c, 0xa6, 0x7b, 0x47, 0x90, 0xbe, 0xa4, 0xa7, 0x45, 0x48, 0x11, 0xb8, 0xd0, 0xa8, 0x1f, 0x48, 0x9f, 0xb2, 0xf1, 0xa8, 0xac, 0x49, 0x32, 0xac, 0xb6, 0xa8, 0xf0, 0x49, 0xc8, 0xa6, 0x28, 0xa9, 0x47, 0x4a, 0x6f, 0x9f, 0x7a, 0xa9, 0x77, 0x4b, 0x72, 0x97, 0xf5, 0xa9, 0xbf, 0x4c, 0x86, 0x90, 0x65, 0xa9, 0xf7, 0x4e, 0x8e, 0x86, 0xc1, 0xaa, 0x4a, 0x51, 0x1b, 0x7c, 0xef, 0xaa, 0xdb, 0x55, 0x25, 0x72, 0xf3, 0xab, 0xb4, 0x59, 0xb6, 0x69, 0x6b, 0xad, 0x0c, 0x5d, 0x9d, 0x60, 0xc6, 0xae, 0x71, 0x61, 0xd5, 0x58, 0xde, 0xb0, 0x7f, 0x66, 0x39, 0x50, 0x69, 0xab, 0x0f, 0x43, 0x04, 0xce, 0x0a, 0xac, 0x9f, 0x43, 0xb1, 0xc6, 0xff, 0xae, 0x16, 0x44, 0x46, 0xc0, 0x5b, 0xae, 0xcf, 0x44, 0xa0, 0xba, 0xe5, 0xaf, 0x8b, 0x44, 0xff, 0xb5, 0x77, 0xb0, 0x54, 0x45, 0x6a, 0xb0, 0x01, 0xb0, 0x6b, 0x45, 0xeb, 0xa9, 0xc0, 0xb0, 0x90, 0x46, 0x72, 0xa3, 0x72, 0xb0, 0xc6, 0x47, 0x26, 0x9c, 0xbe, 0xb1, 0x0b, 0x48, 0x11, 0x95, 0x90, 0xb1, 0x5a, 0x49, 0x3f, 0x8d, 0xe8, 0xb1, 0x9e, 0x4b, 0x37, 0x84, 0x7d, 0xb2, 0x16, 0x4d, 0xfe, 0x7a, 0xa2, 0xb2, 0xc1, 0x51, 0x31, 0x70, 0x84, 0xb3, 0x9f, 0x56, 0x84, 0x67, 0x08, 0xb5, 0x07, 0x5a, 0xb2, 0x5e, 0x3e, 0xb7, 0x07, 0x5f, 0x4a, 0x54, 0xaf, 0xb2, 0xc1, 0x40, 0x1a, 0xce, 0x71, 0xb4, 0x0c, 0x40, 0xa1, 0xc8, 0x5a, 0xb5, 0x7d, 0x41, 0x54, 0xc2, 0x7c, 0xb6, 0x74, 0x41, 0xc5, 0xbd, 0x1a, 0xb7, 0x1b, 0x42, 0x0b, 0xb7, 0xf2, 0xb7, 0xc4, 0x42, 0x56, 0xb2, 0xbf, 0xb8, 0x26, 0x42, 0xad, 0xad, 0x25, 0xb8, 0x3e, 0x43, 0x15, 0xa7, 0x19, 0xb8, 0x5f, 0x43, 0x82, 0xa1, 0x06, 0xb8, 0x85, 0x44, 0x27, 0x9a, 0x47, 0xb8, 0xb5, 0x44, 0xde, 0x93, 0x5b, 0xb8, 0xed, 0x46, 0x43, 0x8b, 0x5f, 0xb9, 0x29, 0x48, 0x45, 0x82, 0x50, 0xb9, 0xa0, 0x4b, 0x4a, 0x78, 0x98, 0xba, 0x4b, 0x4e, 0xa0, 0x6e, 0x7a, 0xbb, 0x33, 0x53, 0x9b, 0x64, 0x9c, 0xbc, 0xf4, 0x58, 0x04, 0x5a, 0xf5, 0xba, 0xe1, 0x3d, 0x58, 0xcf, 0x7d, 0xbb, 0xe7, 0x3e, 0x07, 0xc9, 0xeb, 0xbd, 0x2b, 0x3e, 0xb8, 0xc4, 0x90, 0xbe, 0x5f, 0x3f, 0x5c, 0xbf, 0x68, 0xbe, 0xe6, 0x3f, 0x7c, 0xba, 0x5b, 0xbf, 0x6e, 0x3f, 0xa1, 0xb5, 0x47, 0xbf, 0xf4, 0x3f, 0xc7, 0xb0, 0x31, 0xc0, 0x25, 0x40, 0x01, 0xaa, 0x68, 0xc0, 0x5c, 0x40, 0x3e, 0xa4, 0x97, 0xc0, 0x6f, 0x40, 0x9e, 0x9e, 0x8c, 0xc0, 0x83, 0x41, 0x16, 0x97, 0xf1, 0xc0, 0x8b, 0x41, 0xa4, 0x91, 0x45, 0xc0, 0xe1, 0x43, 0x41, 0x89, 0x1b, 0xc1, 0x25, 0x45, 0x36, 0x80, 0x6e, 0xc2, 0x2d, 0x47, 0xb3, 0x77, 0x9e, 0xc2, 0x00, 0x4b, 0xf9, 0x6c, 0xae, 0xc2, 0xc0, 0x50, 0xc9, 0x61, 0xd7, 0x2a, 0x65, 0xa6, 0xe7, 0x9a, 0x77, 0x2c, 0x7c, 0xa8, 0x07, 0x91, 0xee, 0x2d, 0xbe, 0xa9, 0xac, 0x89, 0xc5, 0x2e, 0xb0, 0xab, 0x74, 0x81, 0xab, 0x2f, 0x5e, 0xad, 0xd1, 0x79, 0x67, 0x2f, 0xd9, 0xb0, 0x3d, 0x71, 0x27, 0x30, 0x87, 0xb2, 0x9d, 0x68, 0x85, 0x30, 0x64, 0xb5, 0x72, 0x5f, 0xd8, 0x31, 0x30, 0xb7, 0xab, 0x57, 0x84, 0x32, 0x2b, 0xb9, 0x88, 0x4f, 0x88, 0x34, 0x0a, 0xbb, 0x4b, 0x49, 0xcf, 0x37, 0x88, 0xbb, 0x9d, 0x43, 0x9e, 0x38, 0xc9, 0xbd, 0x5b, 0x3e, 0x9a, 0x3d, 0x28, 0xbd, 0x06, 0x3a, 0xdd, 0x40, 0x56, 0xbd, 0x5d, 0x37, 0x50, 0x43, 0x1b, 0xbd, 0xc2, 0x33, 0xe0, 0x45, 0xc7, 0xbe, 0x16, 0x30, 0x52, 0x31, 0x2e, 0xa0, 0x4c, 0x9e, 0x03, 0x33, 0x9f, 0xa1, 0x3a, 0x95, 0x24, 0x35, 0xae, 0xa2, 0x6a, 0x8c, 0xae, 0x37, 0x22, 0xa4, 0x11, 0x84, 0x77, 0x38, 0x3c, 0xa6, 0x2c, 0x7c, 0x3a, 0x39, 0x06, 0xa8, 0xb2, 0x74, 0x01, 0x39, 0xea, 0xab, 0x33, 0x6b, 0x3b, 0x3a, 0xd3, 0xad, 0xb0, 0x61, 0xf0, 0x3b, 0xf4, 0xb0, 0x0b, 0x59, 0x00, 0x3c, 0xc6, 0xb1, 0xf7, 0x50, 0x53, 0x3e, 0xfa, 0xb3, 0xaa, 0x4a, 0x39, 0x41, 0x1b, 0xb5, 0x19, 0x44, 0x06, 0x43, 0x82, 0xb6, 0x34, 0x3f, 0x04, 0x46, 0x45, 0xb7, 0x12, 0x3b, 0xb7, 0x48, 0xe4, 0xb7, 0xd5, 0x38, 0x54, 0x4b, 0x5c, 0xb8, 0x7e, 0x34, 0xe4, 0x4d, 0xb8, 0xb9, 0x11, 0x31, 0x55, 0x37, 0x35, 0x99, 0xce, 0xa2, 0xb5, 0x3a, 0x86, 0x9a, 0x6f, 0x98, 0xba, 0x3d, 0x68, 0x9b, 0x0f, 0x90, 0x09, 0x3f, 0x17, 0x9c, 0xdf, 0x87, 0x8c, 0x40, 0x96, 0x9e, 0xda, 0x7f, 0x0c, 0x41, 0x87, 0xa1, 0x53, 0x76, 0xcb, 0x42, 0x7a, 0xa3, 0xb8, 0x6e, 0x75, 0x43, 0xa0, 0xa6, 0x11, 0x64, 0xfd, 0x44, 0xee, 0xa8, 0x91, 0x5c, 0x24, 0x46, 0x45, 0xaa, 0xee, 0x53, 0x89, 0x48, 0x21, 0xad, 0x1f, 0x4c, 0x6a, 0x4a, 0x5d, 0xae, 0xfa, 0x46, 0x22, 0x4c, 0x88, 0xb0, 0x76, 0x3f, 0xf8, 0x4e, 0xf9, 0xb1, 0x98, 0x3c, 0xb5, 0x51, 0x47, 0xb2, 0x9f, 0x39, 0x5f, 0x53, 0x77, 0xb3, 0x99, 0x35, 0xfd, 0x55, 0xaf, 0xb4, 0x85, 0x32, 0x5b, 0x3d, 0xef, 0x93, 0x72, 0xa5, 0x90, 0x41, 0x5b, 0x93, 0xb0, 0x9c, 0x79, 0x44, 0xbc, 0x94, 0x05, 0x93, 0xd1, 0x47, 0x45, 0x95, 0x29, 0x8b, 0x3b, 0x49, 0x0c, 0x96, 0xfa, 0x82, 0xae, 0x4a, 0x66, 0x99, 0x3c, 0x7a, 0x33, 0x4b, 0x68, 0x9b, 0xad, 0x71, 0xe2, 0x4c, 0x6a, 0x9e, 0x38, 0x68, 0xb5, 0x4d, 0x78, 0xa0, 0x97, 0x5f, 0x3b, 0x4f, 0x5a, 0xa3, 0x77, 0x57, 0x71, 0x50, 0xfe, 0xa5, 0xf8, 0x4f, 0x94, 0x53, 0x14, 0xa8, 0x52, 0x49, 0x73, 0x55, 0x1f, 0xaa, 0x48, 0x43, 0x1c, 0x57, 0x38, 0xab, 0xf5, 0x3e, 0x53, 0x59, 0x62, 0xad, 0x6c, 0x3a, 0xcf, 0x5b, 0x7f, 0xae, 0xbe, 0x37, 0x4b, 0x5d, 0xcd, 0xb0, 0x16, 0x33, 0x41, 0x45, 0x66, 0x8c, 0x8f, 0xa9, 0xe1, 0x49, 0x48, 0x8c, 0x9b, 0xa0, 0xc6, 0x4c, 0xaf, 0x8c, 0xe5, 0x98, 0x2f, 0x4f, 0xf7, 0x8d, 0x45, 0x8f, 0xa0, 0x51, 0xd9, 0x8e, 0xe4, 0x86, 0xf1, 0x53, 0x7f, 0x90, 0xb3, 0x7e, 0x3c, 0x54, 0xd9, 0x92, 0xf8, 0x75, 0xab, 0x56, 0x08, 0x95, 0x7b, 0x6d, 0x32, 0x57, 0x30, 0x98, 0x33, 0x64, 0x5b, 0x58, 0x9c, 0x9b, 0x1f, 0x5b, 0xef, 0x5a, 0x54, 0x9e, 0x26, 0x53, 0xf2, 0x5c, 0x29, 0xa1, 0x04, 0x4c, 0xe3, 0x5e, 0x01, 0xa3, 0x8d, 0x46, 0xc5, 0x5f, 0xd6, 0xa5, 0xb1, 0x40, 0x8c, 0x61, 0xa8, 0xa7, 0x87, 0x3c, 0xbc, 0x63, 0x84, 0xa9, 0x30, 0x38, 0xfc, 0x65, 0x96, 0xaa, 0xd7, 0x34, 0xaf, 0x4e, 0x5f, 0x85, 0x1a, 0xae, 0x5d, 0x52, 0x2f, 0x85, 0x28, 0xa5, 0xad, 0x55, 0xbe, 0x85, 0x46, 0x9d, 0x32, 0x58, 0xf1, 0x85, 0x8a, 0x94, 0xb3, 0x5b, 0x93, 0x86, 0x4c, 0x8c, 0x2d, 0x5d, 0x5f, 0x87, 0xbe, 0x83, 0x99, 0x5e, 0xb3, 0x8a, 0x11, 0x7a, 0xc9, 0x5f, 0xc6, 0x8c, 0xaa, 0x71, 0xe8, 0x60, 0xf4, 0x8f, 0x6b, 0x69, 0x4a, 0x62, 0x39, 0x92, 0x11, 0x60, 0xed, 0x63, 0xce, 0x95, 0x8e, 0x59, 0x25, 0x65, 0x6e, 0x98, 0xc0, 0x51, 0x48, 0x67, 0x0e, 0x9b, 0xee, 0x4a, 0xc3, 0x68, 0xd1, 0x9e, 0xaa, 0x44, 0x78, 0x6a, 0x8c, 0xa0, 0xf4, 0x3e, 0xfe, 0x6c, 0x2c, 0xa3, 0x09, 0x3a, 0xe8, 0x6d, 0xf1, 0xa5, 0x03, 0x36, 0x71, 0x58, 0x3f, 0x7d, 0x1d, 0xb3, 0x80, 0x5c, 0x01, 0x7d, 0x50, 0xaa, 0xbe, 0x5f, 0x96, 0x7d, 0x81, 0xa2, 0x85, 0x62, 0xac, 0x7d, 0xcf, 0x99, 0xfb, 0x65, 0x79, 0x7e, 0x3c, 0x91, 0x62, 0x67, 0x18, 0x7f, 0x6e, 0x88, 0xe8, 0x68, 0x85, 0x80, 0x80, 0x80, 0x80, 0x69, 0xc0, 0x83, 0xd1, 0x77, 0xb6, 0x6a, 0xf0, 0x86, 0xb1, 0x6f, 0x11, 0x6c, 0x20, 0x89, 0xbf, 0x66, 0xa0, 0x6d, 0x60, 0x8c, 0xad, 0x5e, 0x50, 0x6f, 0x11, 0x90, 0x11, 0x56, 0x89, 0x70, 0xa7, 0x93, 0x69, 0x4f, 0x13, 0x71, 0xf6, 0x96, 0xc7, 0x48, 0xe2, 0x73, 0x7b, 0x99, 0x98, 0x42, 0xbb, 0x75, 0x17, 0x9c, 0x20, 0x3d, 0x6c, 0x76, 0xd7, 0x9e, 0x85, 0x38, 0x78, 0x63, 0x7c, 0x73, 0xe4, 0xb8, 0xb1, 0x67, 0x57, 0x74, 0x54, 0xaf, 0xf4, 0x6a, 0xbd, 0x74, 0xb4, 0xa7, 0xb4, 0x6d, 0xe4, 0x75, 0x25, 0x9f, 0x64, 0x70, 0x07, 0x76, 0x39, 0x96, 0xf6, 0x71, 0xf6, 0x77, 0x5d, 0x8e, 0x8a, 0x72, 0xe9, 0x79, 0x0d, 0x86, 0x0f, 0x73, 0xd7, 0x7b, 0x0a, 0x7d, 0x7c, 0x74, 0xee, 0x7e, 0x07, 0x74, 0xb8, 0x76, 0x3f, 0x81, 0x26, 0x6c, 0x43, 0x77, 0x82, 0x84, 0x5c, 0x63, 0xfd, 0x78, 0xbd, 0x87, 0x97, 0x5c, 0x01, 0x7a, 0x1d, 0x8a, 0xe2, 0x54, 0x3b, 0x7b, 0x87, 0x8e, 0x4f, 0x4d, 0x0c, 0x7c, 0xe9, 0x91, 0xbb, 0x46, 0xe2, 0x7e, 0x56, 0x94, 0xa4, 0x40, 0xe1, 0x7f, 0xc3, 0x97, 0x76, 0x3b, 0x2d, 0x6f, 0x9b, 0x6a, 0xfd, 0xbe, 0x7a, 0x73, 0x24, 0x6b, 0xae, 0xb5, 0x7f, 0x76, 0x38, 0x6c, 0x4e, 0xad, 0x0d, 0x78, 0xd4, 0x6c, 0xf8, 0xa4, 0xdb, 0x7a, 0xee, 0x6d, 0xeb, 0x9c, 0x87, 0x7c, 0x79, 0x6f, 0x3c, 0x94, 0x09, 0x7d, 0xbd, 0x71, 0x02, 0x8b, 0x8c, 0x7e, 0x95, 0x73, 0x13, 0x83, 0x36, 0x7f, 0x4c, 0x75, 0xc6, 0x7a, 0x7d, 0x80, 0x36, 0x78, 0xcf, 0x71, 0xb2, 0x81, 0x55, 0x7c, 0x0e, 0x69, 0x80, 0x82, 0xc1, 0x7f, 0x39, 0x61, 0x7a, 0x84, 0x04, 0x82, 0x8a, 0x59, 0xe8, 0x85, 0x57, 0x85, 0xbf, 0x52, 0x4b, 0x86, 0x65, 0x89, 0x74, 0x4b, 0x53, 0x87, 0x93, 0x8d, 0x0d, 0x44, 0xf4, 0x89, 0x0b, 0x90, 0x7b, 0x3e, 0x99, 0x7b, 0x50, 0x61, 0xa0, 0xc3, 0x98, 0x7e, 0xab, 0x62, 0xf7, 0xba, 0xc6, 0x81, 0xc7, 0x64, 0x2a, 0xb2, 0x71, 0x84, 0x12, 0x65, 0x34, 0xaa, 0x5e, 0x86, 0x0f, 0x66, 0x27, 0xa2, 0x4e, 0x87, 0x44, 0x67, 0x83, 0x9a, 0x21, 0x88, 0x58, 0x68, 0xf4, 0x91, 0xe7, 0x89, 0x13, 0x6b, 0x1c, 0x89, 0x2f, 0x89, 0xe2, 0x6d, 0x61, 0x80, 0x93, 0x8a, 0xaa, 0x70, 0x99, 0x77, 0x83, 0x8b, 0x66, 0x73, 0xb7, 0x6e, 0xda, 0x8c, 0x70, 0x76, 0xff, 0x66, 0xf0, 0x8d, 0xc6, 0x7a, 0x20, 0x5f, 0x2c, 0x8f, 0x3a, 0x7d, 0xa4, 0x57, 0xe8, 0x90, 0xc9, 0x81, 0x05, 0x50, 0x8c, 0x91, 0x4c, 0x85, 0x08, 0x49, 0x80, 0x92, 0x2c, 0x88, 0xe0, 0x42, 0x86, 0x87, 0x97, 0x59, 0xf9, 0xc8, 0xbb, 0x8a, 0x93, 0x5b, 0x53, 0xbe, 0xfb, 0x8c, 0xeb, 0x5c, 0x93, 0xb7, 0x25, 0x8f, 0x30, 0x5d, 0xcb, 0xaf, 0x77, 0x90, 0x95, 0x5e, 0xdc, 0xa7, 0x8e, 0x91, 0xff, 0x5f, 0xf1, 0x9f, 0x7c, 0x92, 0xc0, 0x61, 0x85, 0x97, 0xac, 0x93, 0x9d, 0x63, 0x0a, 0x8f, 0xc9, 0x93, 0xf6, 0x65, 0x91, 0x86, 0xb3, 0x94, 0x7e, 0x68, 0x30, 0x7d, 0xbe, 0x95, 0x2f, 0x6b, 0x77, 0x74, 0x86, 0x96, 0x49, 0x6e, 0xce, 0x6c, 0x2c, 0x97, 0x6f, 0x72, 0x15, 0x64, 0x6c, 0x98, 0xb7, 0x75, 0x5a, 0x5c, 0xdc, 0x9a, 0x39, 0x78, 0xd4, 0x55, 0x7c, 0x9b, 0xad, 0x7c, 0x8d, 0x4e, 0x23, 0x9c, 0x9d, 0x80, 0xea, 0x46, 0x8f, 0x93, 0x00, 0x53, 0x1f, 0xcc, 0x91, 0x95, 0x86, 0x54, 0x7e, 0xc2, 0xea, 0x97, 0x6c, 0x55, 0xc0, 0xbb, 0x48, 0x99, 0x3a, 0x57, 0x02, 0xb4, 0x01, 0x9a, 0xb1, 0x58, 0x2a, 0xac, 0xac, 0x9b, 0xbd, 0x59, 0x3e, 0xa5, 0x35, 0x9c, 0xa9, 0x5a, 0x70, 0x9d, 0x88, 0x9d, 0x42, 0x5b, 0xe4, 0x95, 0xa9, 0x9d, 0xec, 0x5d, 0xa3, 0x8d, 0x6a, 0x9e, 0x6b, 0x60, 0x3f, 0x84, 0x1a, 0x9e, 0xc7, 0x63, 0x4a, 0x7a, 0xc9, 0x9f, 0x70, 0x66, 0x98, 0x71, 0x77, 0xa0, 0x86, 0x6a, 0x0d, 0x69, 0x7e, 0xa1, 0xe3, 0x6d, 0x4d, 0x61, 0xe4, 0xa3, 0xac, 0x70, 0xac, 0x5a, 0x72, 0xa5, 0x44, 0x74, 0x3c, 0x52, 0xc7, 0xa6, 0x75, 0x78, 0xa2, 0x4a, 0xc6, 0x9c, 0xb5, 0x4d, 0x48, 0xcf, 0xb6, 0x9f, 0x1d, 0x4e, 0x79, 0xc6, 0x53, 0xa1, 0x04, 0x4f, 0x9f, 0xbe, 0x77, 0xa2, 0x62, 0x50, 0xbf, 0xb7, 0xbc, 0xa3, 0xd9, 0x52, 0x10, 0xb0, 0xe9, 0xa4, 0xb5, 0x53, 0x23, 0xa9, 0xe5, 0xa5, 0x88, 0x54, 0x2d, 0xa2, 0xc3, 0xa6, 0x15, 0x55, 0x6b, 0x9b, 0x3d, 0xa6, 0x89, 0x56, 0xcc, 0x93, 0x83, 0xa6, 0xf1, 0x58, 0xb8, 0x8a, 0xe8, 0xa7, 0x63, 0x5b, 0x1d, 0x81, 0xad, 0xa8, 0x1a, 0x5e, 0x9f, 0x77, 0xd6, 0xa8, 0xe5, 0x62, 0x2c, 0x6e, 0x8e, 0xaa, 0x03, 0x65, 0xc8, 0x66, 0xb6, 0xab, 0x74, 0x69, 0x3b, 0x5f, 0x0c, 0xad, 0xad, 0x6c, 0xf9, 0x57, 0x62, 0xb0, 0x9a, 0x70, 0x89, 0x4f, 0x43, 0xa5, 0x67, 0x49, 0xef, 0xd0, 0xc3, 0xa7, 0x9c, 0x4b, 0x06, 0xc8, 0x35, 0xa9, 0x89, 0x4b, 0xdc, 0xc0, 0xe2, 0xaa, 0xb3, 0x4c, 0x8f, 0xba, 0x96, 0xab, 0xda, 0x4d, 0x51, 0xb4, 0x52, 0xac, 0xdd, 0x4e, 0x19, 0xad, 0xd2, 0xad, 0x7a, 0x4e, 0xd6, 0xa7, 0x05, 0xae, 0x32, 0x4f, 0x9e, 0xa0, 0x25, 0xae, 0xa4, 0x50, 0xd4, 0x98, 0xbc, 0xaf, 0x19, 0x52, 0x2a, 0x91, 0x53, 0xaf, 0x65, 0x54, 0x3e, 0x88, 0x55, 0xaf, 0xd4, 0x56, 0x9a, 0x7f, 0x22, 0xb0, 0x96, 0x5a, 0x58, 0x75, 0x5e, 0xb1, 0xb5, 0x5e, 0x35, 0x6c, 0x11, 0xb3, 0x0e, 0x61, 0xdd, 0x63, 0xd2, 0xb4, 0x7b, 0x65, 0xc7, 0x5b, 0xe9, 0xb6, 0x8f, 0x69, 0xf8, 0x53, 0x58, 0xad, 0xa8, 0x47, 0x1f, 0xd1, 0x2a, 0xaf, 0x99, 0x47, 0xee, 0xc9, 0x83, 0xb1, 0x46, 0x48, 0x89, 0xc2, 0xe9, 0xb2, 0x89, 0x49, 0x21, 0xbc, 0xdf, 0xb3, 0x7b, 0x49, 0xb4, 0xb7, 0x20, 0xb4, 0x73, 0x4a, 0x4e, 0xb1, 0x4d, 0xb4, 0xfb, 0x4a, 0xec, 0xaa, 0xfd, 0xb5, 0x71, 0x4b, 0x90, 0xa4, 0x83, 0xb5, 0xee, 0x4c, 0x4d, 0x9d, 0xcb, 0xb6, 0x5f, 0x4d, 0x3d, 0x96, 0x95, 0xb6, 0xdc, 0x4e, 0x4d, 0x8f, 0x2a, 0xb7, 0x1f, 0x50, 0x4a, 0x85, 0xed, 0xb7, 0x84, 0x52, 0xff, 0x7c, 0xad, 0xb8, 0x63, 0x56, 0x8c, 0x73, 0x14, 0xb9, 0x93, 0x5a, 0xbf, 0x69, 0xc9, 0xbb, 0x36, 0x5e, 0x7b, 0x60, 0xf0, 0xbd, 0x43, 0x62, 0xfe, 0x57, 0x88, 0xb5, 0xb8, 0x44, 0x17, 0xd0, 0xe6, 0xb7, 0x50, 0x44, 0xd3, 0xca, 0xc1, 0xb8, 0xe2, 0x45, 0x8a, 0xc4, 0xe0, 0xba, 0x5a, 0x46, 0x40, 0xbf, 0x2c, 0xbb, 0x1b, 0x46, 0xa6, 0xb9, 0xb4, 0xbb, 0xe2, 0x47, 0x12, 0xb4, 0x32, 0xbc, 0x90, 0x47, 0x83, 0xae, 0x82, 0xbc, 0xec, 0x48, 0x07, 0xa8, 0x50, 0xbd, 0x54, 0x48, 0x90, 0xa2, 0x19, 0xbd, 0xb0, 0x49, 0x39, 0x9b, 0x64, 0xbe, 0x0d, 0x49, 0xf8, 0x94, 0x68, 0xbe, 0x64, 0x4b, 0x32, 0x8c, 0xb5, 0xbe, 0x9c, 0x4d, 0x36, 0x83, 0xc9, 0xbf, 0x1b, 0x4f, 0xc5, 0x7a, 0x5a, 0xc0, 0x09, 0x52, 0xfa, 0x70, 0xcc, 0xc1, 0x45, 0x57, 0x9c, 0x67, 0x5c, 0xc3, 0x10, 0x5b, 0xcc, 0x5d, 0x8b, 0xbc, 0xfd, 0x40, 0x8b, 0xd1, 0x59, 0xbe, 0xb4, 0x41, 0x88, 0xcb, 0xc6, 0xc0, 0x53, 0x42, 0x6b, 0xc6, 0x90, 0xc2, 0x0d, 0x42, 0xfe, 0xc1, 0x99, 0xc3, 0x04, 0x43, 0x28, 0xbc, 0x7a, 0xc3, 0xaf, 0x43, 0x53, 0xb7, 0x36, 0xc4, 0x5a, 0x43, 0x84, 0xb1, 0xe8, 0xc4, 0xd7, 0x43, 0xd0, 0xac, 0x2f, 0xc5, 0x7c, 0x44, 0x07, 0xa6, 0x59, 0xc5, 0x97, 0x44, 0xa2, 0xa0, 0x30, 0xc5, 0xe9, 0x45, 0x39, 0x99, 0x6d, 0xc6, 0x3c, 0x45, 0xe2, 0x92, 0x91, 0xc6, 0xef, 0x47, 0x07, 0x8b, 0x00, 0xc7, 0x3d, 0x48, 0xf4, 0x82, 0x7c, 0xc8, 0x04, 0x4b, 0xad, 0x79, 0x82, 0xc8, 0x04, 0x4f, 0x9a, 0x6e, 0x9a, 0xc8, 0xe0, 0x54, 0xa2, 0x64, 0x07, 0x33, 0x1d, 0xac, 0xac, 0x9f, 0x79, 0x34, 0x89, 0xae, 0x09, 0x96, 0xf6, 0x35, 0xa3, 0xaf, 0x88, 0x8e, 0xa2, 0x36, 0x4b, 0xb1, 0x53, 0x86, 0x87, 0x36, 0x99, 0xb3, 0x3d, 0x7e, 0x6b, 0x36, 0xed, 0xb5, 0x62, 0x76, 0x58, 0x37, 0x2e, 0xb7, 0x79, 0x6e, 0x24, 0x37, 0x69, 0xb9, 0xd6, 0x65, 0x8b, 0x37, 0x24, 0xbc, 0x5c, 0x5d, 0x06, 0x37, 0xfe, 0xbd, 0xb2, 0x54, 0x84, 0x39, 0x42, 0xbf, 0x04, 0x4d, 0x46, 0x3b, 0xc6, 0xbf, 0xa8, 0x47, 0x13, 0x3d, 0xd8, 0xc0, 0x46, 0x40, 0x91, 0x40, 0x67, 0xc0, 0xd7, 0x3c, 0xbc, 0x43, 0xcf, 0xc0, 0xce, 0x39, 0x54, 0x46, 0x95, 0xc1, 0x01, 0x35, 0xd3, 0x49, 0x26, 0xc1, 0x38, 0x32, 0x2b, 0x38, 0xe6, 0xa6, 0x9b, 0xa2, 0xde, 0x3b, 0x4f, 0xa7, 0x66, 0x9a, 0x0b, 0x3d, 0x52, 0xa8, 0x66, 0x91, 0x9d, 0x3e, 0x9e, 0xaa, 0x0c, 0x89, 0x51, 0x3f, 0x8b, 0xab, 0xdc, 0x81, 0x08, 0x40, 0x46, 0xae, 0x42, 0x78, 0xda, 0x40, 0xcf, 0xb0, 0xac, 0x70, 0xb9, 0x41, 0x51, 0xb2, 0xd3, 0x67, 0xaa, 0x41, 0xc2, 0xb4, 0xf2, 0x5e, 0xd5, 0x42, 0x5a, 0xb6, 0x96, 0x56, 0x1b, 0x43, 0x65, 0xb8, 0x09, 0x4e, 0x44, 0x45, 0x8c, 0xb9, 0x20, 0x48, 0x47, 0x47, 0x94, 0xb9, 0xf3, 0x41, 0xf4, 0x4a, 0x21, 0xba, 0x99, 0x3d, 0xe3, 0x4c, 0xba, 0xbb, 0x21, 0x3a, 0x6b, 0x4f, 0x2a, 0xbb, 0x9d, 0x36, 0xe3, 0x51, 0x71, 0xbc, 0x24, 0x33, 0x40, 0x3e, 0x1b, 0xa0, 0xd1, 0xa6, 0x63, 0x41, 0x38, 0xa1, 0x1b, 0x9d, 0x0b, 0x44, 0x05, 0xa1, 0xb6, 0x94, 0x7d, 0x46, 0x2a, 0xa2, 0xea, 0x8c, 0x0a, 0x47, 0x7d, 0xa4, 0xc5, 0x83, 0xa6, 0x48, 0x74, 0xa6, 0xec, 0x7b, 0x64, 0x49, 0x28, 0xa9, 0x5d, 0x73, 0x47, 0x49, 0xf8, 0xab, 0xbc, 0x6a, 0x73, 0x4a, 0xda, 0xad, 0xf9, 0x61, 0x3c, 0x4b, 0xee, 0xaf, 0xfc, 0x58, 0x86, 0x4c, 0xdf, 0xb1, 0x9b, 0x50, 0x23, 0x4e, 0xf9, 0xb3, 0x00, 0x4a, 0x27, 0x50, 0xf6, 0xb4, 0x1c, 0x43, 0xfe, 0x52, 0xf6, 0xb5, 0x14, 0x3e, 0xff, 0x55, 0x30, 0xb6, 0x00, 0x3b, 0x9a, 0x57, 0x54, 0xb6, 0xe0, 0x38, 0x1a, 0x59, 0x83, 0xb7, 0xa7, 0x34, 0x63, 0x44, 0x7d, 0x9a, 0x6d, 0xaa, 0x39, 0x48, 0x13, 0x9a, 0x67, 0xa0, 0x9c, 0x4b, 0x2d, 0x9a, 0xd2, 0x97, 0xfa, 0x4e, 0x26, 0x9b, 0x59, 0x8f, 0x6e, 0x4f, 0x97, 0x9d, 0x5b, 0x86, 0xd4, 0x50, 0xbc, 0x9f, 0x82, 0x7e, 0x4a, 0x51, 0xa5, 0xa1, 0xb6, 0x76, 0x3c, 0x52, 0x83, 0xa3, 0xf6, 0x6d, 0xf6, 0x53, 0x99, 0xa6, 0x11, 0x64, 0xb2, 0x54, 0xe5, 0xa8, 0x5d, 0x5c, 0x2e, 0x56, 0x37, 0xaa, 0xa1, 0x54, 0x02, 0x57, 0xd4, 0xac, 0x9d, 0x4c, 0xd7, 0x59, 0xd0, 0xae, 0x4b, 0x46, 0x95, 0x5b, 0xc3, 0xaf, 0xaf, 0x40, 0x4b, 0x5d, 0xc2, 0xb0, 0xe7, 0x3c, 0xcf, 0x5f, 0xa9, 0xb2, 0x01, 0x39, 0x59, 0x61, 0xb9, 0xb3, 0x11, 0x35, 0x61, 0x4b, 0x40, 0x93, 0xd1, 0xae, 0x07, 0x4f, 0x0f, 0x93, 0xce, 0xa4, 0xbb, 0x52, 0xac, 0x93, 0xd3, 0x9b, 0xf8, 0x55, 0xfc, 0x94, 0x0a, 0x93, 0x72, 0x58, 0x3f, 0x95, 0x26, 0x8a, 0xe8, 0x59, 0xaa, 0x96, 0xe6, 0x82, 0x55, 0x5a, 0xc4, 0x99, 0x13, 0x79, 0xef, 0x5b, 0xa4, 0x9b, 0x6a, 0x71, 0xc3, 0x5c, 0xb8, 0x9d, 0xc4, 0x68, 0xe6, 0x5d, 0xe2, 0x9f, 0xf0, 0x5f, 0xaf, 0x5f, 0x9b, 0xa2, 0xc8, 0x58, 0x1a, 0x61, 0x03, 0xa5, 0x42, 0x50, 0x4d, 0x62, 0xbf, 0xa7, 0x7f, 0x4a, 0x2b, 0x64, 0x7b, 0xa9, 0x58, 0x44, 0x0a, 0x66, 0x3a, 0xaa, 0xf1, 0x3e, 0xd2, 0x68, 0x0e, 0xac, 0x7a, 0x3a, 0xf4, 0x6a, 0x10, 0xad, 0xfb, 0x36, 0x96, 0x53, 0x37, 0x8c, 0xcf, 0xb2, 0x20, 0x57, 0x44, 0x8c, 0x94, 0xa9, 0x33, 0x5b, 0x16, 0x8c, 0x65, 0xa0, 0x9d, 0x5e, 0x67, 0x8c, 0x92, 0x98, 0x12, 0x61, 0x75, 0x8c, 0xe1, 0x8f, 0x83, 0x62, 0xd3, 0x8e, 0x8d, 0x86, 0xc9, 0x64, 0x10, 0x90, 0x71, 0x7e, 0x0c, 0x65, 0x39, 0x92, 0xaf, 0x75, 0xbd, 0x66, 0x52, 0x95, 0x08, 0x6d, 0x83, 0x67, 0x87, 0x97, 0x85, 0x64, 0xff, 0x68, 0xdc, 0x9a, 0x41, 0x5c, 0xa9, 0x6a, 0x75, 0x9d, 0x43, 0x54, 0xc1, 0x6c, 0x11, 0xa0, 0x0a, 0x4d, 0x85, 0x6d, 0xa5, 0xa2, 0x61, 0x47, 0xb5, 0x6f, 0x37, 0xa4, 0x5a, 0x41, 0xc9, 0x70, 0xcc, 0xa6, 0x40, 0x3d, 0x27, 0x72, 0x91, 0xa8, 0x2e, 0x38, 0x74, 0x5c, 0xaf, 0x85, 0x0d, 0xb6, 0xf5, 0x60, 0xa3, 0x85, 0x01, 0xad, 0xf7, 0x64, 0x59, 0x84, 0xdd, 0xa5, 0x92, 0x67, 0xbf, 0x84, 0xe0, 0x9d, 0x17, 0x6a, 0x94, 0x85, 0x41, 0x94, 0x8f, 0x6c, 0x99, 0x86, 0x30, 0x8c, 0x11, 0x6d, 0xcd, 0x87, 0xb7, 0x83, 0x86, 0x6e, 0xfb, 0x89, 0xfc, 0x7a, 0xf1, 0x70, 0x20, 0x8c, 0x62, 0x72, 0x74, 0x71, 0x49, 0x8e, 0xf4, 0x6a, 0x03, 0x72, 0x78, 0x91, 0x7c, 0x61, 0xa9, 0x73, 0xfb, 0x94, 0xa9, 0x59, 0xe6, 0x75, 0x81, 0x97, 0xb6, 0x52, 0x24, 0x76, 0xf1, 0x9a, 0xaa, 0x4b, 0xa9, 0x78, 0x85, 0x9d, 0x32, 0x45, 0xa5, 0x7a, 0x3c, 0x9f, 0x5a, 0x3f, 0xc0, 0x7b, 0xcf, 0xa1, 0xaa, 0x3a, 0xa3, 0x66, 0xe1, 0x7c, 0xb4, 0xbb, 0xde, 0x6a, 0xdc, 0x7c, 0xc0, 0xb2, 0xf9, 0x6e, 0x7d, 0x7c, 0xc7, 0xaa, 0x84, 0x71, 0xce, 0x7c, 0xe7, 0xa2, 0x12, 0x74, 0x68, 0x7d, 0x7c, 0x99, 0xa9, 0x76, 0xc3, 0x7e, 0x2c, 0x91, 0x45, 0x77, 0xd4, 0x7f, 0x6a, 0x88, 0xdd, 0x78, 0xb9, 0x80, 0x80, 0x80, 0x80, 0x7a, 0x0e, 0x83, 0xb4, 0x77, 0xeb, 0x7b, 0x55, 0x86, 0x74, 0x6f, 0x76, 0x7c, 0x9f, 0x89, 0x63, 0x67, 0x3c, 0x7d, 0xef, 0x8c, 0x15, 0x5f, 0x0e, 0x7f, 0x57, 0x8f, 0x2f, 0x57, 0x39, 0x80, 0xab, 0x92, 0x35, 0x4f, 0xa1, 0x81, 0xec, 0x95, 0x6c, 0x49, 0xb3, 0x83, 0x4e, 0x98, 0x33, 0x43, 0xc8, 0x84, 0xda, 0x9a, 0xe1, 0x3d, 0xb0, 0x72, 0x72, 0x73, 0x7e, 0xc0, 0xf9, 0x76, 0x23, 0x73, 0xd4, 0xb8, 0x33, 0x79, 0x9a, 0x74, 0x0e, 0xaf, 0xba, 0x7c, 0x87, 0x74, 0x89, 0xa7, 0x65, 0x7f, 0x45, 0x75, 0x13, 0x9f, 0x07, 0x81, 0x14, 0x76, 0x31, 0x96, 0xab, 0x82, 0xaa, 0x77, 0x68, 0x8e, 0x52, 0x83, 0x64, 0x79, 0x27, 0x86, 0x00, 0x84, 0x25, 0x7b, 0x21, 0x7d, 0x90, 0x85, 0x3c, 0x7e, 0x0e, 0x74, 0xee, 0x86, 0x88, 0x81, 0x06, 0x6c, 0xab, 0x87, 0xdb, 0x83, 0xfa, 0x64, 0xb3, 0x89, 0x2a, 0x86, 0xe5, 0x5c, 0xe0, 0x8a, 0xa2, 0x89, 0xf5, 0x55, 0x37, 0x8c, 0x16, 0x8d, 0x17, 0x4d, 0xe4, 0x8d, 0x23, 0x90, 0x8c, 0x47, 0xa7, 0x8e, 0x19, 0x93, 0xf7, 0x41, 0x3f, 0x7d, 0xfc, 0x6a, 0x6b, 0xc6, 0x07, 0x81, 0x80, 0x6b, 0x45, 0xbd, 0x3d, 0x84, 0xab, 0x6b, 0xde, 0xb4, 0xe5, 0x87, 0x6b, 0x6c, 0x7f, 0xac, 0xa0, 0x89, 0xb1, 0x6d, 0x33, 0xa4, 0x5a, 0x8b, 0x81, 0x6e, 0x2c, 0x9c, 0x0c, 0x8c, 0xf5, 0x6f, 0x62, 0x93, 0xc0, 0x8e, 0x1c, 0x71, 0x27, 0x8b, 0x6c, 0x8e, 0xe6, 0x73, 0x3f, 0x83, 0x34, 0x8f, 0x9c, 0x75, 0xde, 0x7a, 0xa8, 0x90, 0x83, 0x78, 0xcf, 0x72, 0x0d, 0x91, 0xb9, 0x7b, 0xd4, 0x6a, 0x19, 0x93, 0x3c, 0x7e, 0xb2, 0x62, 0x64, 0x94, 0x9b, 0x81, 0xed, 0x5a, 0xdc, 0x95, 0xfe, 0x85, 0x24, 0x53, 0x4e, 0x97, 0x17, 0x88, 0x91, 0x4c, 0x20, 0x98, 0x22, 0x8c, 0x2a, 0x45, 0x2d, 0x89, 0xd2, 0x61, 0x28, 0xca, 0xd1, 0x8d, 0x5a, 0x62, 0x6d, 0xc1, 0x4d, 0x90, 0x0e, 0x63, 0xad, 0xb9, 0x68, 0x92, 0x88, 0x64, 0xbe, 0xb1, 0xa7, 0x94, 0x58, 0x65, 0xb9, 0xa9, 0x9e, 0x95, 0xff, 0x66, 0xac, 0xa1, 0x85, 0x97, 0x1c, 0x68, 0x03, 0x99, 0x8a, 0x98, 0x2f, 0x69, 0x65, 0x91, 0x87, 0x98, 0xe3, 0x6b, 0x8c, 0x88, 0xf2, 0x99, 0xa8, 0x6d, 0xc1, 0x80, 0x7c, 0x9a, 0x93, 0x70, 0xce, 0x77, 0xa9, 0x9b, 0x81, 0x73, 0xbb, 0x6f, 0x38, 0x9c, 0xb4, 0x76, 0xcb, 0x67, 0x90, 0x9e, 0x30, 0x79, 0xaf, 0x5f, 0xed, 0x9f, 0xd3, 0x7d, 0x24, 0x58, 0x81, 0xa1, 0x6a, 0x80, 0x9a, 0x51, 0x07, 0xa2, 0x4b, 0x84, 0x6d, 0x49, 0x5c, 0x95, 0x72, 0x59, 0xf1, 0xcf, 0x09, 0x98, 0x39, 0x5b, 0x09, 0xc5, 0xd6, 0x9a, 0xb5, 0x5c, 0x17, 0xbd, 0xb6, 0x9c, 0xd5, 0x5d, 0x35, 0xb6, 0x2c, 0x9e, 0xd6, 0x5e, 0x47, 0xae, 0x78, 0xa0, 0x1a, 0x5f, 0x55, 0xa6, 0xa6, 0xa1, 0x3e, 0x60, 0x80, 0x9e, 0xc5, 0xa1, 0xff, 0x62, 0x0e, 0x97, 0x15, 0xa2, 0xc5, 0x63, 0xa4, 0x8f, 0x3a, 0xa3, 0x34, 0x66, 0x1b, 0x86, 0x69, 0xa3, 0xcf, 0x68, 0xa9, 0x7d, 0xac, 0xa4, 0xbc, 0x6b, 0xd3, 0x74, 0xbb, 0xa6, 0x08, 0x6e, 0xf0, 0x6c, 0x98, 0xa7, 0x54, 0x72, 0x09, 0x65, 0x04, 0xa8, 0xc9, 0x75, 0x36, 0x5d, 0x5a, 0xaa, 0x65, 0x78, 0xc5, 0x55, 0x9f, 0xab, 0xed, 0x7c, 0x94, 0x4d, 0xa7, 0x9f, 0x73, 0x51, 0xe8, 0xd2, 0x24, 0xa1, 0xe6, 0x54, 0x17, 0xc9, 0x42, 0xa4, 0x45, 0x55, 0xb5, 0xc1, 0x10, 0xa5, 0xfb, 0x56, 0xf3, 0xb9, 0xe1, 0xa7, 0x9a, 0x58, 0x19, 0xb2, 0xbe, 0xa8, 0xd4, 0x59, 0x23, 0xab, 0x77, 0xa9, 0xdf, 0x5a, 0x25, 0xa4, 0x0e, 0xaa, 0xc2, 0x5b, 0x51, 0x9c, 0x7b, 0xab, 0x7a, 0x5c, 0xb2, 0x94, 0xc6, 0xac, 0x35, 0x5e, 0x6f, 0x8c, 0x86, 0xac, 0xe4, 0x60, 0xc3, 0x83, 0x9b, 0xad, 0x6f, 0x63, 0xd0, 0x7a, 0xa6, 0xae, 0x53, 0x67, 0x1f, 0x71, 0xbd, 0xaf, 0xa8, 0x6a, 0x7a, 0x69, 0xe5, 0xb1, 0x43, 0x6d, 0xa8, 0x62, 0x48, 0xb3, 0x4e, 0x71, 0x1e, 0x5a, 0x6a, 0xb4, 0xe3, 0x74, 0xf9, 0x51, 0xf8, 0xa8, 0x15, 0x4e, 0x25, 0xd3, 0x3b, 0xaa, 0x6d, 0x4f, 0x26, 0xcb, 0x08, 0xac, 0x96, 0x4f, 0xf8, 0xc3, 0xaa, 0xae, 0x6f, 0x51, 0x0f, 0xbc, 0xc5, 0xaf, 0xd8, 0x52, 0x4b, 0xb6, 0x23, 0xb1, 0x25, 0x53, 0x7a, 0xaf, 0x6e, 0xb1, 0xeb, 0x54, 0x7b, 0xa8, 0x7e, 0xb2, 0xc3, 0x55, 0x79, 0xa1, 0x7c, 0xb3, 0x64, 0x56, 0xb7, 0x9a, 0x24, 0xb3, 0xfe, 0x58, 0x09, 0x92, 0xae, 0xb4, 0x84, 0x59, 0xf5, 0x8a, 0x31, 0xb5, 0x1c, 0x5c, 0x2f, 0x81, 0x46, 0xb6, 0x22, 0x5f, 0x74, 0x77, 0xca, 0xb7, 0x29, 0x62, 0xd8, 0x6e, 0xf3, 0xb8, 0x86, 0x66, 0x64, 0x67, 0x02, 0xba, 0x3f, 0x69, 0xb9, 0x5f, 0x26, 0xbc, 0x4f, 0x6d, 0xd6, 0x56, 0x6e, 0xb0, 0x77, 0x4b, 0x36, 0xd3, 0xcc, 0xb2, 0xbc, 0x4c, 0x17, 0xcc, 0x29, 0xb4, 0x9a, 0x4c, 0xc1, 0xc5, 0x88, 0xb6, 0x58, 0x4d, 0x72, 0xbf, 0x1b, 0xb7, 0x6b, 0x4e, 0x27, 0xb9, 0x07, 0xb8, 0x89, 0x4e, 0xe4, 0xb2, 0xde, 0xb9, 0x6a, 0x4f, 0xa0, 0xac, 0x76, 0xba, 0x22, 0x50, 0x68, 0xa5, 0xdd, 0xba, 0xe2, 0x51, 0x5a, 0x9f, 0x2a, 0xbb, 0x6e, 0x52, 0x83, 0x97, 0xeb, 0xbc, 0x03, 0x53, 0xb3, 0x90, 0xa3, 0xbc, 0x58, 0x55, 0xe5, 0x87, 0xe2, 0xbc, 0xd9, 0x58, 0x42, 0x7e, 0xf8, 0xbe, 0x06, 0x5b, 0x8e, 0x75, 0x8a, 0xbf, 0x87, 0x5f, 0x0a, 0x6c, 0x51, 0xc1, 0x0e, 0x62, 0xae, 0x63, 0xe1, 0xc3, 0x07, 0x67, 0x3f, 0x5a, 0x11, 0xb8, 0xb3, 0x47, 0xf9, 0xd3, 0xba, 0xba, 0xae, 0x48, 0xef, 0xcd, 0x1a, 0xbc, 0x5f, 0x49, 0xae, 0xc7, 0x27, 0xbe, 0x12, 0x4a, 0x73, 0xc1, 0x52, 0xbf, 0x1b, 0x4b, 0x02, 0xbb, 0x96, 0xbf, 0xfd, 0x4b, 0x88, 0xb5, 0xd5, 0xc0, 0xef, 0x4b, 0xf6, 0xb0, 0x1e, 0xc1, 0x8d, 0x4c, 0x7f, 0xa9, 0xdf, 0xc2, 0x34, 0x4d, 0x10, 0xa3, 0x93, 0xc2, 0xcf, 0x4d, 0xbf, 0x9c, 0xe7, 0xc3, 0x58, 0x4e, 0x95, 0x95, 0xbd, 0xc3, 0xe0, 0x4f, 0xb7, 0x8e, 0x31, 0xc4, 0x26, 0x51, 0xf9, 0x85, 0x8d, 0xc4, 0xbb, 0x54, 0x9f, 0x7c, 0xa3, 0xc5, 0xd5, 0x57, 0xea, 0x73, 0x2e, 0xc7, 0x2e, 0x5b, 0xd9, 0x69, 0xaf, 0xc9, 0x07, 0x5f, 0xeb, 0x5f, 0xcc, 0xc0, 0x6e, 0x44, 0x88, 0xd4, 0x0f, 0xc2, 0x74, 0x45, 0x90, 0xce, 0x0a, 0xc4, 0x36, 0x46, 0x1d, 0xc8, 0xe6, 0xc5, 0xe6, 0x46, 0x77, 0xc3, 0xe6, 0xc7, 0x58, 0x46, 0xad, 0xbe, 0xdf, 0xc8, 0x18, 0x46, 0xfc, 0xb9, 0x60, 0xc8, 0xdb, 0x47, 0x50, 0xb3, 0xd7, 0xc9, 0x8c, 0x47, 0xb7, 0xae, 0x1a, 0xca, 0x1c, 0x48, 0x3a, 0xa7, 0xfc, 0xca, 0xab, 0x48, 0xcd, 0xa1, 0xd1, 0xcb, 0x28, 0x49, 0x8c, 0x9b, 0x04, 0xcb, 0x8a, 0x4a, 0x78, 0x93, 0xd6, 0xcc, 0x09, 0x4b, 0xd7, 0x8c, 0x23, 0xcc, 0x7a, 0x4d, 0xd4, 0x83, 0xc5, 0xcd, 0x03, 0x50, 0xb0, 0x7a, 0x8a, 0xcd, 0xd1, 0x54, 0x2b, 0x70, 0xef, 0xce, 0xff, 0x58, 0xd2, 0x66, 0x43, 0x3b, 0x3b, 0xb3, 0x06, 0xa4, 0xd7, 0x3d, 0x01, 0xb3, 0xed, 0x9c, 0x2b, 0x3d, 0xba, 0xb5, 0x5a, 0x93, 0xf2, 0x3e, 0x22, 0xb6, 0xf8, 0x8b, 0xb9, 0x3e, 0x36, 0xb8, 0xb5, 0x83, 0x76, 0x3e, 0x41, 0xba, 0x98, 0x7b, 0x4a, 0x3e, 0x35, 0xbc, 0x85, 0x73, 0x29, 0x3e, 0x0c, 0xbe, 0xc3, 0x6a, 0xcc, 0x3e, 0x1c, 0xc0, 0xe9, 0x62, 0xac, 0x3d, 0xff, 0xc2, 0x46, 0x5a, 0x04, 0x3d, 0x80, 0xc3, 0x64, 0x50, 0xd9, 0x3f, 0xa6, 0xc3, 0xbd, 0x4a, 0x88, 0x41, 0x08, 0xc4, 0x73, 0x44, 0x54, 0x44, 0x39, 0xc4, 0x24, 0x3f, 0x3d, 0x47, 0x23, 0xc4, 0x47, 0x3b, 0xb3, 0x49, 0xdd, 0xc4, 0x63, 0x38, 0x09, 0x4c, 0x8d, 0xc4, 0x63, 0x34, 0x38, 0x41, 0x00, 0xad, 0x10, 0xa7, 0xea, 0x43, 0xa4, 0xad, 0x64, 0x9e, 0xb4, 0x45, 0x10, 0xae, 0x8b, 0x96, 0x4b, 0x46, 0x34, 0xaf, 0xec, 0x8d, 0xe4, 0x46, 0xbc, 0xb1, 0xb2, 0x85, 0x95, 0x47, 0x21, 0xb3, 0x9c, 0x7d, 0x5c, 0x47, 0x64, 0xb5, 0xca, 0x75, 0x56, 0x47, 0xa8, 0xb7, 0xd7, 0x6d, 0x13, 0x48, 0x11, 0xb9, 0xec, 0x64, 0x95, 0x48, 0x7a, 0xbb, 0xaa, 0x5c, 0x2b, 0x48, 0xe5, 0xbc, 0xe7, 0x53, 0x8b, 0x4a, 0x53, 0xbd, 0xa9, 0x4c, 0x8f, 0x4c, 0x56, 0xbe, 0x0a, 0x46, 0x52, 0x4e, 0x47, 0xbe, 0x40, 0x40, 0x51, 0x50, 0xbc, 0xbe, 0xa1, 0x3c, 0xb6, 0x52, 0xfc, 0xbf, 0x18, 0x39, 0x18, 0x55, 0x3f, 0xbf, 0x7e, 0x35, 0x57, 0x46, 0x01, 0xa7, 0x35, 0xab, 0x64, 0x49, 0x2b, 0xa7, 0x41, 0xa1, 0xb9, 0x4b, 0x77, 0xa7, 0xf7, 0x99, 0x1d, 0x4d, 0x87, 0xa8, 0xd7, 0x90, 0xa4, 0x4e, 0x9b, 0xaa, 0xa9, 0x88, 0x23, 0x4f, 0x74, 0xac, 0x8b, 0x7f, 0x97, 0x4f, 0xea, 0xaf, 0x1f, 0x77, 0x7f, 0x50, 0x3e, 0xb1, 0x7c, 0x6f, 0x7d, 0x51, 0x0e, 0xb3, 0x45, 0x66, 0xca, 0x51, 0xc9, 0xb5, 0x05, 0x5e, 0x62, 0x52, 0x9b, 0xb6, 0x64, 0x56, 0x18, 0x53, 0xac, 0xb7, 0x88, 0x4e, 0x81, 0x55, 0x79, 0xb8, 0x51, 0x48, 0x8f, 0x57, 0x1d, 0xb8, 0xf6, 0x42, 0x5a, 0x59, 0x16, 0xb9, 0x9f, 0x3e, 0x15, 0x5b, 0x32, 0xba, 0x59, 0x3a, 0x72, 0x5d, 0x87, 0xbb, 0x01, 0x36, 0x49, 0x4b, 0x0a, 0xa1, 0x5e, 0xae, 0xe7, 0x4e, 0x9d, 0xa1, 0x4b, 0xa4, 0xe9, 0x51, 0xc9, 0xa1, 0x6b, 0x9b, 0xf8, 0x54, 0x95, 0xa1, 0xdf, 0x93, 0x8e, 0x56, 0x67, 0xa3, 0x2e, 0x8b, 0x22, 0x57, 0x78, 0xa5, 0x02, 0x82, 0xa6, 0x58, 0x4c, 0xa7, 0x2a, 0x7a, 0x7e, 0x58, 0xe8, 0xa9, 0x95, 0x72, 0x7e, 0x59, 0xc2, 0xab, 0xcb, 0x69, 0xde, 0x5a, 0xb3, 0xad, 0xd7, 0x61, 0x20, 0x5b, 0xb3, 0xaf, 0xc9, 0x58, 0xc7, 0x5c, 0x97, 0xb1, 0x52, 0x50, 0xa5, 0x5e, 0x64, 0xb2, 0x8a, 0x4a, 0x9a, 0x60, 0x1e, 0xb3, 0x8b, 0x44, 0xa5, 0x61, 0xc1, 0xb4, 0x5d, 0x3f, 0x56, 0x63, 0x9a, 0xb5, 0x49, 0x3b, 0xb4, 0x65, 0xa1, 0xb6, 0x2b, 0x37, 0x7f, 0x51, 0x81, 0x9b, 0x08, 0xb2, 0x44, 0x55, 0x7c, 0x9a, 0x9f, 0xa8, 0xd3, 0x59, 0x14, 0x9a, 0x54, 0x9f, 0xca, 0x5c, 0x25, 0x9a, 0xad, 0x97, 0x53, 0x5e, 0xe9, 0x9b, 0x43, 0x8e, 0xdf, 0x5f, 0xea, 0x9d, 0x19, 0x86, 0x4d, 0x60, 0xbe, 0x9f, 0x19, 0x7d, 0xdf, 0x61, 0x94, 0xa1, 0x56, 0x75, 0xe6, 0x62, 0x7b, 0xa3, 0x8a, 0x6d, 0xbd, 0x63, 0xa6, 0xa5, 0x93, 0x64, 0xe1, 0x64, 0xd8, 0xa7, 0xcd, 0x5c, 0x9e, 0x66, 0x0a, 0xaa, 0x01, 0x54, 0xbd, 0x67, 0x5e, 0xab, 0xe4, 0x4d, 0x99, 0x69, 0x0c, 0xad, 0x6e, 0x47, 0x99, 0x6a, 0xb0, 0xae, 0xb5, 0x41, 0x82, 0x6c, 0x72, 0xb0, 0x05, 0x3d, 0x1c, 0x6e, 0x4c, 0xb1, 0x3f, 0x38, 0x9e, 0x59, 0x38, 0x94, 0x20, 0xb6, 0x45, 0x5d, 0x39, 0x93, 0xa8, 0xac, 0xf4, 0x60, 0xd1, 0x93, 0x5a, 0xa4, 0x33, 0x64, 0x25, 0x93, 0x4e, 0x9b, 0x9d, 0x67, 0x03, 0x93, 0xaa, 0x93, 0x1c, 0x68, 0xc8, 0x94, 0xf0, 0x8a, 0x91, 0x69, 0xf7, 0x96, 0xb1, 0x82, 0x05, 0x6a, 0xfb, 0x98, 0xc4, 0x79, 0xd5, 0x6b, 0xd7, 0x9a, 0xef, 0x71, 0xe0, 0x6c, 0xd7, 0x9d, 0x27, 0x69, 0x3e, 0x6d, 0xce, 0x9f, 0x39, 0x60, 0x26, 0x6f, 0x64, 0xa2, 0x02, 0x58, 0x95, 0x70, 0xc2, 0xa4, 0x6a, 0x51, 0x05, 0x72, 0x3e, 0xa6, 0x74, 0x4b, 0x1b, 0x73, 0xc2, 0xa8, 0x31, 0x45, 0x54, 0x75, 0x46, 0xa9, 0xc1, 0x3f, 0xaf, 0x77, 0x16, 0xab, 0xb7, 0x3a, 0x9d, 0x61, 0xa6, 0x8c, 0xc4, 0xba, 0xc3, 0x65, 0xf7, 0x8c, 0x3d, 0xb1, 0x74, 0x69, 0xb6, 0x8b, 0xe5, 0xa8, 0xdf, 0x6d, 0x4f, 0x8b, 0x9d, 0xa0, 0x64, 0x6f, 0xe8, 0x8c, 0x17, 0x97, 0xdc, 0x72, 0x1e, 0x8c, 0xc8, 0x8f, 0x4d, 0x73, 0x2f, 0x8e, 0x7e, 0x86, 0xa6, 0x74, 0x47, 0x90, 0x5b, 0x7e, 0x0f, 0x75, 0x77, 0x92, 0x5e, 0x75, 0xf8, 0x76, 0x8d, 0x94, 0x89, 0x6d, 0xec, 0x77, 0xa3, 0x96, 0xee, 0x65, 0x91, 0x78, 0xc2, 0x99, 0x74, 0x5d, 0x5b, 0x7a, 0x34, 0x9c, 0x51, 0x55, 0xa3, 0x7b, 0xb2, 0x9e, 0xee, 0x4e, 0x6d, 0x7d, 0x45, 0xa1, 0x1e, 0x48, 0xae, 0x7e, 0xcb, 0xa2, 0xff, 0x43, 0x0d, 0x80, 0x72, 0xa5, 0x43, 0x3c, 0xeb, 0x6b, 0x5f, 0x84, 0xbe, 0xbf, 0x41, 0x6f, 0x76, 0x84, 0x77, 0xb6, 0x4d, 0x73, 0x4b, 0x84, 0x2f, 0xad, 0xb8, 0x76, 0x9f, 0x84, 0x21, 0xa5, 0x34, 0x79, 0x83, 0x84, 0x5b, 0x9c, 0xb8, 0x7b, 0xc7, 0x84, 0xfe, 0x94, 0x53, 0x7d, 0x4d, 0x86, 0x09, 0x8b, 0xed, 0x7e, 0x28, 0x87, 0x83, 0x83, 0x7e, 0x7f, 0x42, 0x89, 0xb3, 0x7b, 0x0d, 0x80, 0x71, 0x8c, 0x15, 0x72, 0xb0, 0x81, 0xaa, 0x8e, 0x8a, 0x6a, 0x75, 0x82, 0xe7, 0x90, 0xea, 0x62, 0x4f, 0x84, 0x1a, 0x93, 0xc6, 0x5a, 0xa3, 0x85, 0x71, 0x96, 0x9f, 0x53, 0x1e, 0x86, 0xb7, 0x99, 0x76, 0x4c, 0xa1, 0x88, 0x0c, 0x9c, 0x19, 0x46, 0xbc, 0x89, 0xc2, 0x9e, 0xbb, 0x3f, 0xfa, 0x75, 0xcb, 0x7c, 0x75, 0xc3, 0xf1, 0x79, 0xc8, 0x7c, 0x58, 0xbb, 0x47, 0x7d, 0x68, 0x7c, 0x42, 0xb2, 0xd8, 0x80, 0x9c, 0x7c, 0x62, 0xaa, 0x61, 0x83, 0x83, 0x7c, 0x9a, 0xa1, 0xd4, 0x85, 0x97, 0x7d, 0x4e, 0x99, 0x6c, 0x87, 0x6c, 0x7e, 0x1c, 0x91, 0x0c, 0x88, 0x32, 0x7f, 0x68, 0x88, 0xc0, 0x88, 0xdc, 0x80, 0x80, 0x80, 0x80, 0x8a, 0x2f, 0x83, 0x99, 0x78, 0x19, 0x8b, 0x62, 0x86, 0x42, 0x6f, 0xcc, 0x8c, 0xb0, 0x88, 0xee, 0x67, 0xd6, 0x8d, 0xfd, 0x8b, 0x54, 0x5f, 0xd2, 0x8f, 0xa7, 0x8e, 0x35, 0x58, 0x20, 0x91, 0x75, 0x90, 0xe9, 0x50, 0x89, 0x92, 0x13, 0x94, 0x51, 0x4a, 0x93, 0x93, 0x43, 0x97, 0xed, 0x43, 0x30, 0x81, 0x24, 0x73, 0x4f, 0xc8, 0xca, 0x84, 0xf2, 0x73, 0xb4, 0xc0, 0x1f, 0x88, 0x37, 0x73, 0xd1, 0xb7, 0xba, 0x8b, 0x51, 0x73, 0xf6, 0xaf, 0x54, 0x8d, 0xcf, 0x74, 0x7c, 0xa6, 0xf3, 0x90, 0x0c, 0x75, 0x1d, 0x9e, 0x91, 0x91, 0x88, 0x76, 0x3c, 0x96, 0x53, 0x92, 0xcc, 0x77, 0x81, 0x8e, 0x16, 0x93, 0x75, 0x79, 0x3e, 0x85, 0xe7, 0x94, 0x2e, 0x7b, 0x2f, 0x7d, 0xa4, 0x95, 0x4b, 0x7d, 0xff, 0x75, 0x39, 0x96, 0xa4, 0x80, 0xca, 0x6d, 0x2b, 0x98, 0x01, 0x83, 0x94, 0x65, 0x69, 0x99, 0x53, 0x86, 0x63, 0x5d, 0xad, 0x9a, 0xda, 0x89, 0x6e, 0x56, 0x31, 0x9c, 0x7e, 0x8c, 0x50, 0x4e, 0xdb, 0x9d, 0xbd, 0x8f, 0xba, 0x47, 0xd2, 0x8d, 0x0c, 0x6a, 0x32, 0xcd, 0x92, 0x90, 0x74, 0x6a, 0xf6, 0xc4, 0xa4, 0x93, 0x53, 0x6b, 0x89, 0xbc, 0x53, 0x95, 0xf1, 0x6c, 0x13, 0xb4, 0x1f, 0x98, 0x27, 0x6c, 0xb4, 0xab, 0xe0, 0x99, 0xfd, 0x6d, 0x75, 0xa3, 0xaa, 0x9b, 0x77, 0x6e, 0x84, 0x9b, 0x81, 0x9c, 0xb6, 0x6f, 0xcc, 0x93, 0x54, 0x9d, 0xab, 0x71, 0x99, 0x8b, 0x2a, 0x9e, 0x5d, 0x73, 0x8f, 0x83, 0x12, 0x9f, 0x37, 0x76, 0x08, 0x7a, 0xbb, 0xa0, 0x57, 0x78, 0xcb, 0x72, 0x65, 0xa1, 0xa9, 0x7b, 0xa5, 0x6a, 0x97, 0xa3, 0x41, 0x7e, 0x6f, 0x62, 0xef, 0xa4, 0xac, 0x81, 0x9c, 0x5b, 0x56, 0xa5, 0xde, 0x84, 0xdf, 0x53, 0xcc, 0xa7, 0x3a, 0x88, 0x4f, 0x4c, 0x15, 0x98, 0x1d, 0x61, 0x06, 0xd1, 0xd6, 0x9b, 0x36, 0x62, 0x01, 0xc8, 0xfb, 0x9e, 0x2a, 0x62, 0xe0, 0xc0, 0xb7, 0xa0, 0x86, 0x63, 0xfc, 0xb8, 0xb8, 0xa2, 0xa5, 0x64, 0xff, 0xb0, 0xa1, 0xa4, 0x03, 0x66, 0x0c, 0xa8, 0xb9, 0xa5, 0x5f, 0x67, 0x0a, 0xa0, 0xd0, 0xa6, 0x4a, 0x68, 0x74, 0x98, 0xef, 0xa7, 0x32, 0x69, 0xdf, 0x90, 0xf8, 0xa7, 0xe4, 0x6c, 0x01, 0x88, 0x8e, 0xa8, 0xaa, 0x6e, 0x16, 0x80, 0x47, 0xa9, 0xca, 0x71, 0x0a, 0x77, 0xc0, 0xaa, 0xe0, 0x73, 0xdc, 0x6f, 0x8c, 0xac, 0x33, 0x76, 0xd7, 0x67, 0xf2, 0xad, 0xca, 0x79, 0xac, 0x60, 0x50, 0xaf, 0x54, 0x7d, 0x2b, 0x58, 0x89, 0xb0, 0xad, 0x80, 0xc9, 0x50, 0x6d, 0xa2, 0x22, 0x59, 0x6e, 0xd5, 0xba, 0xa5, 0x25, 0x5b, 0x27, 0xcc, 0x00, 0xa7, 0xb4, 0x5c, 0x27, 0xc3, 0xfa, 0xa9, 0xdf, 0x5d, 0x06, 0xbc, 0x5c, 0xab, 0x9f, 0x5d, 0xfa, 0xb4, 0xe6, 0xad, 0x2b, 0x5e, 0xe3, 0xad, 0x5a, 0xae, 0x7b, 0x5f, 0xdb, 0xa5, 0xaf, 0xaf, 0xa4, 0x61, 0x10, 0x9e, 0x02, 0xb0, 0x55, 0x62, 0x9b, 0x96, 0x53, 0xb0, 0xfe, 0x64, 0x49, 0x8e, 0x6e, 0xb1, 0x91, 0x66, 0x99, 0x85, 0xf2, 0xb2, 0x4c, 0x69, 0x16, 0x7d, 0x7f, 0xb3, 0x79, 0x6c, 0x2f, 0x74, 0xf1, 0xb5, 0x04, 0x6f, 0x2e, 0x6c, 0xf2, 0xb6, 0x79, 0x72, 0x3c, 0x65, 0x43, 0xb7, 0xfc, 0x75, 0x6c, 0x5d, 0x60, 0xb9, 0x9b, 0x79, 0x48, 0x54, 0xcc, 0xaa, 0xf9, 0x53, 0xf3, 0xd6, 0xf7, 0xad, 0xfa, 0x55, 0x91, 0xcd, 0xaa, 0xb0, 0x5a, 0x56, 0x7d, 0xc6, 0x52, 0xb2, 0x80, 0x57, 0x49, 0xbf, 0x3a, 0xb3, 0xe8, 0x58, 0x56, 0xb8, 0x5c, 0xb5, 0x4d, 0x59, 0x46, 0xb1, 0x72, 0xb6, 0x63, 0x5a, 0x38, 0xaa, 0x5a, 0xb7, 0x74, 0x5b, 0x2e, 0xa3, 0x28, 0xb8, 0x4e, 0x5c, 0x56, 0x9b, 0xc7, 0xb8, 0xfe, 0x5d, 0xab, 0x94, 0x44, 0xb9, 0xad, 0x5f, 0x63, 0x8c, 0x26, 0xba, 0x47, 0x61, 0xab, 0x83, 0x72, 0xbb, 0x1e, 0x64, 0x8e, 0x7a, 0xc1, 0xbc, 0x62, 0x67, 0xa8, 0x72, 0x31, 0xbe, 0x05, 0x6a, 0xe0, 0x6a, 0x31, 0xc0, 0x06, 0x6d, 0xf3, 0x62, 0x60, 0xc1, 0xc0, 0x71, 0xfa, 0x59, 0x44, 0xb3, 0x50, 0x4f, 0x65, 0xd7, 0x82, 0xb6, 0x0f, 0x50, 0x53, 0xce, 0xd5, 0xb8, 0x2d, 0x51, 0x58, 0xc8, 0x1d, 0xba, 0x32, 0x52, 0x52, 0xc1, 0x8a, 0xbb, 0x9a, 0x53, 0x4c, 0xbb, 0x2b, 0xbc, 0xd4, 0x54, 0x3c, 0xb4, 0xcd, 0xbd, 0xf4, 0x55, 0x1d, 0xae, 0x53, 0xbe, 0xdc, 0x56, 0x06, 0xa7, 0x97, 0xbf, 0xce, 0x56, 0xee, 0xa0, 0xcd, 0xc0, 0x79, 0x58, 0x0e, 0x99, 0x91, 0xc1, 0x1b, 0x59, 0x45, 0x92, 0x3a, 0xc1, 0xa6, 0x5b, 0x33, 0x89, 0xe0, 0xc2, 0x46, 0x5d, 0x59, 0x81, 0x21, 0xc3, 0x92, 0x60, 0x65, 0x77, 0xea, 0xc4, 0xdb, 0x63, 0xaa, 0x6f, 0x3e, 0xc6, 0x5e, 0x67, 0x4a, 0x66, 0xde, 0xc8, 0xd1, 0x6b, 0xf9, 0x5b, 0xc6, 0xbc, 0x05, 0x4c, 0x35, 0xd6, 0xb6, 0xbe, 0x4c, 0x4d, 0x1b, 0xcf, 0x7f, 0xc0, 0x11, 0x4d, 0xd3, 0xc9, 0x72, 0xc1, 0xec, 0x4e, 0x5d, 0xc3, 0xb4, 0xc3, 0x6e, 0x4e, 0xd4, 0xbd, 0xf2, 0xc4, 0x78, 0x4f, 0x5a, 0xb8, 0x10, 0xc5, 0x83, 0x4f, 0xe5, 0xb2, 0x21, 0xc6, 0x66, 0x50, 0xa0, 0xab, 0xe0, 0xc7, 0x38, 0x51, 0x84, 0xa5, 0x68, 0xc8, 0x02, 0x52, 0x76, 0x9e, 0xbb, 0xc8, 0x96, 0x53, 0xa8, 0x97, 0x61, 0xc9, 0x2d, 0x54, 0xe8, 0x8f, 0xfd, 0xc9, 0xa4, 0x57, 0x20, 0x87, 0x8a, 0xca, 0x42, 0x59, 0x69, 0x7e, 0xfa, 0xcb, 0x76, 0x5c, 0xb4, 0x75, 0x86, 0xcc, 0xee, 0x60, 0x47, 0x6c, 0x22, 0xce, 0x95, 0x64, 0xa4, 0x62, 0x1b, 0xc4, 0x94, 0x48, 0xa8, 0xd7, 0x83, 0xc6, 0xc3, 0x49, 0xbe, 0xd0, 0xe5, 0xc8, 0x87, 0x4a, 0x2e, 0xcb, 0x87, 0xca, 0x27, 0x4a, 0x6f, 0xc6, 0x56, 0xcb, 0xc0, 0x4a, 0x86, 0xc1, 0x3d, 0xcc, 0xb4, 0x4a, 0xdf, 0xbb, 0xaa, 0xcd, 0x8e, 0x4b, 0x4c, 0xb5, 0xf0, 0xce, 0x5d, 0x4b, 0xc3, 0xb0, 0x28, 0xcf, 0x13, 0x4c, 0x6c, 0xa9, 0xe8, 0xcf, 0xc8, 0x4d, 0x26, 0xa3, 0x95, 0xd0, 0x6f, 0x4e, 0x0b, 0x9c, 0xcb, 0xd0, 0xd6, 0x4f, 0x3e, 0x95, 0x4c, 0xd1, 0x59, 0x50, 0xc5, 0x8d, 0x9b, 0xd1, 0xf5, 0x52, 0xd3, 0x85, 0x8c, 0xd2, 0x9e, 0x55, 0x5f, 0x7c, 0xe9, 0xd3, 0xa7, 0x58, 0xc4, 0x73, 0x45, 0xd5, 0x80, 0x5c, 0xd9, 0x68, 0x96, 0x43, 0x5d, 0xb9, 0x17, 0xaa, 0x49, 0x44, 0xdb, 0xb9, 0xd8, 0xa1, 0x82, 0x45, 0x72, 0xbb, 0x3a, 0x99, 0x17, 0x45, 0xa4, 0xbc, 0xd6, 0x90, 0xb8, 0x45, 0xab, 0xbe, 0x6c, 0x88, 0x4e, 0x45, 0x88, 0xbf, 0xfe, 0x7f, 0xd8, 0x45, 0x5b, 0xc1, 0xae, 0x77, 0xea, 0x45, 0x01, 0xc3, 0x36, 0x70, 0x11, 0x44, 0xef, 0xc5, 0x09, 0x68, 0x2a, 0x45, 0x16, 0xc6, 0x87, 0x60, 0x5e, 0x44, 0x8b, 0xc7, 0x5e, 0x57, 0x98, 0x44, 0x73, 0xc7, 0xee, 0x4f, 0x23, 0x44, 0x8c, 0xc8, 0xf9, 0x48, 0xed, 0x48, 0x29, 0xc7, 0xf4, 0x43, 0x4e, 0x4a, 0xb2, 0xc7, 0xc1, 0x3e, 0x6d, 0x4d, 0x70, 0xc7, 0xb2, 0x3a, 0xa4, 0x4f, 0xf1, 0xc7, 0xb3, 0x36, 0xa0, 0x48, 0xb8, 0xb3, 0x42, 0xac, 0xd9, 0x4b, 0x01, 0xb3, 0xa1, 0xa3, 0xdb, 0x4c, 0x7d, 0xb4, 0x88, 0x9b, 0x4d, 0x4d, 0x4f, 0xb5, 0xe2, 0x92, 0xe2, 0x4d, 0xd1, 0xb7, 0x72, 0x8a, 0x6d, 0x4e, 0x2d, 0xb9, 0x06, 0x81, 0xe9, 0x4e, 0x58, 0xba, 0xfc, 0x79, 0xd1, 0x4e, 0x60, 0xbc, 0xef, 0x71, 0xcd, 0x4e, 0xb5, 0xbe, 0xe5, 0x69, 0xac, 0x4f, 0x03, 0xc0, 0xae, 0x61, 0xd2, 0x4f, 0x6d, 0xc1, 0x8e, 0x59, 0xcc, 0x4f, 0xdc, 0xc2, 0x36, 0x51, 0xcc, 0x51, 0x3c, 0xc2, 0x59, 0x4b, 0x54, 0x52, 0xb6, 0xc2, 0x5f, 0x45, 0x04, 0x54, 0x51, 0xc2, 0x6d, 0x3f, 0x8c, 0x56, 0x85, 0xc2, 0xbe, 0x3b, 0xca, 0x58, 0xb8, 0xc3, 0x05, 0x37, 0xd1, 0x4d, 0xb9, 0xad, 0xa4, 0xaf, 0xb7, 0x50, 0x8e, 0xad, 0xbc, 0xa6, 0x80, 0x52, 0xf1, 0xae, 0x0a, 0x9d, 0x9f, 0x54, 0x76, 0xaf, 0x1b, 0x95, 0x12, 0x55, 0x97, 0xb0, 0x75, 0x8c, 0x87, 0x56, 0x3f, 0xb2, 0x19, 0x84, 0x08, 0x56, 0xb0, 0xb4, 0x16, 0x7b, 0xe0, 0x56, 0xe2, 0xb6, 0x53, 0x73, 0xfe, 0x57, 0x43, 0xb8, 0x58, 0x6b, 0xec, 0x57, 0xd1, 0xba, 0x40, 0x63, 0xeb, 0x58, 0x66, 0xbb, 0xb6, 0x5b, 0xe9, 0x59, 0x04, 0xbc, 0xa3, 0x53, 0xa9, 0x5a, 0x2e, 0xbd, 0x29, 0x4c, 0xdf, 0x5b, 0xa7, 0xbd, 0x73, 0x46, 0xff, 0x5d, 0x03, 0xbd, 0xa3, 0x41, 0x1c, 0x5f, 0x06, 0xbe, 0x27, 0x3d, 0x09, 0x61, 0x2e, 0xbe, 0x76, 0x38, 0xde, 0x52, 0xf1, 0xa7, 0xca, 0xb3, 0x1b, 0x56, 0x1d, 0xa7, 0x96, 0xa9, 0x94, 0x59, 0x06, 0xa7, 0x77, 0xa0, 0x86, 0x5b, 0x49, 0xa8, 0x29, 0x97, 0xfe, 0x5d, 0x5f, 0xa8, 0xfd, 0x8f, 0x78, 0x5e, 0x34, 0xaa, 0xc1, 0x86, 0xe9, 0x5e, 0xf4, 0xac, 0xa4, 0x7e, 0x6f, 0x5f, 0x70, 0xaf, 0x26, 0x76, 0x7a, 0x5f, 0xdd, 0xb1, 0x72, 0x6e, 0x8d, 0x60, 0xa6, 0xb3, 0x44, 0x66, 0x5c, 0x61, 0x59, 0xb5, 0x04, 0x5e, 0x6d, 0x62, 0x24, 0xb6, 0x33, 0x56, 0x70, 0x63, 0x0e, 0xb7, 0x2e, 0x4e, 0xec, 0x64, 0x7f, 0xb7, 0xca, 0x49, 0x1e, 0x65, 0xd3, 0xb8, 0x3c, 0x43, 0x37, 0x67, 0x5f, 0xb8, 0xc1, 0x3e, 0x52, 0x69, 0x52, 0xb9, 0x6a, 0x3a, 0x03, 0x58, 0xa4, 0xa1, 0xc9, 0xb6, 0xa8, 0x5c, 0x3a, 0xa1, 0x47, 0xac, 0xd8, 0x5f, 0x5d, 0xa1, 0x04, 0xa3, 0xb9, 0x62, 0x47, 0xa1, 0x28, 0x9b, 0x18, 0x64, 0xe8, 0xa1, 0xae, 0x92, 0xbb, 0x66, 0x58, 0xa3, 0x0e, 0x8a, 0x59, 0x67, 0x46, 0xa4, 0xc6, 0x81, 0xec, 0x68, 0x04, 0xa6, 0xe1, 0x79, 0xec, 0x68, 0x8e, 0xa9, 0x30, 0x72, 0x09, 0x69, 0x61, 0xab, 0x4d, 0x69, 0xaa, 0x6a, 0x43, 0xad, 0x50, 0x61, 0x56, 0x6b, 0x38, 0xaf, 0x2b, 0x59, 0x61, 0x6c, 0x0c, 0xb0, 0xb5, 0x51, 0x6c, 0x6d, 0x84, 0xb1, 0xce, 0x4b, 0x69, 0x6e, 0xf2, 0xb2, 0xa7, 0x45, 0xa5, 0x70, 0x48, 0xb3, 0x5b, 0x3f, 0xf4, 0x72, 0x30, 0xb4, 0x7f, 0x3b, 0x3b, 0x60, 0x1b, 0x9b, 0x0b, 0xba, 0x9c, 0x63, 0xd3, 0x9a, 0x5c, 0xb0, 0xda, 0x67, 0x24, 0x99, 0xf7, 0xa7, 0xf8, 0x6a, 0x58, 0x99, 0xab, 0x9f, 0x3b, 0x6c, 0xd1, 0x9a, 0x39, 0x96, 0xc5, 0x6e, 0xda, 0x9b, 0x1a, 0x8e, 0x4f, 0x6f, 0xee, 0x9c, 0xe5, 0x85, 0xd4, 0x70, 0xe5, 0x9e, 0xc7, 0x7d, 0x92, 0x71, 0xa4, 0xa0, 0xd9, 0x75, 0xcc, 0x72, 0x5d, 0xa2, 0xea, 0x6d, 0xcf, 0x73, 0x3f, 0xa4, 0xe6, 0x65, 0x22, 0x74, 0x46, 0xa7, 0x08, 0x5d, 0x23, 0x75, 0x7c, 0xa9, 0x27, 0x55, 0xab, 0x76, 0xb1, 0xaa, 0xf4, 0x4e, 0xa8, 0x78, 0x2b, 0xac, 0x74, 0x48, 0xec, 0x79, 0xa2, 0xad, 0xc6, 0x43, 0x25, 0x7b, 0x5d, 0xaf, 0x72, 0x3d, 0x48, 0x67, 0xb7, 0x93, 0xfd, 0xbe, 0x98, 0x6b, 0xc4, 0x93, 0x4d, 0xb5, 0x35, 0x6f, 0x86, 0x92, 0xc2, 0xac, 0x5c, 0x72, 0xe4, 0x92, 0x62, 0xa3, 0xc3, 0x75, 0x8c, 0x92, 0x97, 0x9b, 0x2f, 0x77, 0xa8, 0x93, 0x47, 0x92, 0xaf, 0x79, 0x10, 0x94, 0xa4, 0x8a, 0x3b, 0x7a, 0x28, 0x96, 0x4a, 0x81, 0xd1, 0x7b, 0x27, 0x98, 0x44, 0x79, 0xcc, 0x7b, 0xf7, 0x9a, 0x61, 0x71, 0xfb, 0x7c, 0xc3, 0x9c, 0x86, 0x69, 0xa4, 0x7d, 0x66, 0x9e, 0x7d, 0x60, 0xf0, 0x7e, 0xc6, 0xa1, 0x18, 0x59, 0x6a, 0x80, 0x2b, 0xa3, 0x6e, 0x52, 0x30, 0x81, 0x94, 0xa5, 0x63, 0x4c, 0x36, 0x83, 0x03, 0xa7, 0x2b, 0x46, 0x86, 0x84, 0x97, 0xa8, 0xf8, 0x40, 0x29, 0x70, 0x99, 0x8c, 0x67, 0xc2, 0xd0, 0x74, 0xde, 0x8b, 0xab, 0xb9, 0xc4, 0x78, 0xbc, 0x8b, 0x24, 0xb1, 0x14, 0x7c, 0x0e, 0x8a, 0xe5, 0xa8, 0x7a, 0x7f, 0x29, 0x8a, 0xc0, 0x9f, 0xd7, 0x81, 0x18, 0x8b, 0x89, 0x97, 0x6b, 0x82, 0xc4, 0x8c, 0x76, 0x8f, 0x00, 0x83, 0x8e, 0x8e, 0x13, 0x86, 0x89, 0x84, 0x6a, 0x8f, 0xe2, 0x7e, 0x18, 0x85, 0x80, 0x91, 0xf0, 0x76, 0x17, 0x86, 0x77, 0x94, 0x18, 0x6e, 0x2f, 0x87, 0x7f, 0x96, 0x5a, 0x66, 0x1d, 0x88, 0x83, 0x98, 0x9f, 0x5e, 0x26, 0x89, 0xda, 0x9b, 0x4d, 0x56, 0xcf, 0x8b, 0x2d, 0x9d, 0xce, 0x4f, 0xac, 0x8c, 0x86, 0xa0, 0x41, 0x49, 0xcf, 0x8e, 0x0c, 0xa2, 0xa3, 0x43, 0x52, 0x7a, 0x9e, 0x84, 0x7b, 0xc7, 0x76, 0x7e, 0xc7, 0x83, 0xfc, 0xbe, 0x89, 0x82, 0x40, 0x83, 0xb0, 0xb6, 0x09, 0x85, 0x81, 0x83, 0x7a, 0xad, 0x75, 0x88, 0x56, 0x83, 0x90, 0xa4, 0xd4, 0x8a, 0xa4, 0x83, 0xf9, 0x9c, 0x55, 0x8c, 0x5d, 0x84, 0xbe, 0x93, 0xfb, 0x8d, 0x85, 0x85, 0xdd, 0x8b, 0xb3, 0x8e, 0x3e, 0x87, 0x52, 0x83, 0x75, 0x8f, 0x36, 0x89, 0x78, 0x7b, 0x32, 0x90, 0x36, 0x8b, 0xcf, 0x72, 0xff, 0x91, 0x57, 0x8e, 0x1a, 0x6a, 0xf3, 0x92, 0x91, 0x90, 0x4b, 0x62, 0xe5, 0x93, 0xf3, 0x92, 0xf3, 0x5b, 0x71, 0x95, 0x69, 0x95, 0xaa, 0x54, 0x44, 0x96, 0x95, 0x98, 0x70, 0x4d, 0x8c, 0x97, 0xd9, 0x9b, 0x89, 0x46, 0x7b, 0x84, 0xf9, 0x7c, 0x39, 0xcc, 0x23, 0x88, 0xe1, 0x7c, 0x03, 0xc3, 0x6d, 0x8c, 0x65, 0x7b, 0xcf, 0xba, 0xf3, 0x8f, 0xb5, 0x7b, 0x9e, 0xb2, 0x6b, 0x92, 0x52, 0x7b, 0xd4, 0xa9, 0xd4, 0x94, 0x8e, 0x7c, 0x42, 0xa1, 0x4f, 0x96, 0x10, 0x7d, 0x26, 0x98, 0xfa, 0x97, 0x67, 0x7e, 0x14, 0x90, 0xaa, 0x98, 0x09, 0x7f, 0x66, 0x88, 0x8d, 0x98, 0xa1, 0x80, 0x80, 0x80, 0x80, 0x99, 0xfc, 0x83, 0x74, 0x78, 0x57, 0x9b, 0x32, 0x86, 0x06, 0x70, 0x44, 0x9c, 0x80, 0x88, 0xa6, 0x68, 0x73, 0x9d, 0xbb, 0x8b, 0x17, 0x60, 0x8a, 0x9f, 0x60, 0x8d, 0xe9, 0x59, 0x1f, 0xa1, 0x20, 0x90, 0x88, 0x51, 0xcf, 0xa2, 0x33, 0x93, 0xc7, 0x4a, 0xa9, 0x90, 0xa9, 0x73, 0x31, 0xd0, 0x7a, 0x94, 0x00, 0x73, 0x5b, 0xc7, 0xe3, 0x97, 0x23, 0x73, 0x77, 0xbf, 0x87, 0x99, 0xee, 0x73, 0x93, 0xb6, 0xf2, 0x9c, 0x7c, 0x73, 0xca, 0xae, 0x63, 0x9e, 0x67, 0x74, 0x73, 0xa6, 0x29, 0xa0, 0x13, 0x75, 0x46, 0x9d, 0xfa, 0xa1, 0x38, 0x76, 0x78, 0x95, 0xdb, 0xa2, 0x31, 0x77, 0xca, 0x8d, 0xc0, 0xa2, 0xd3, 0x79, 0x6c, 0x85, 0xbc, 0xa3, 0x98, 0x7b, 0x45, 0x7d, 0xa8, 0xa4, 0xd5, 0x7d, 0xf4, 0x75, 0x7c, 0xa6, 0x3a, 0x80, 0xa1, 0x6d, 0x8f, 0xa7, 0xa3, 0x83, 0x5e, 0x65, 0xe3, 0xa8, 0xeb, 0x86, 0x18, 0x5e, 0x33, 0xaa, 0x24, 0x89, 0x41, 0x56, 0xb0, 0xab, 0xa5, 0x8c, 0x6e, 0x4e, 0xdc, 0x9b, 0xd1, 0x69, 0xfe, 0xd4, 0xb5, 0x9e, 0xee, 0x6a, 0x7a, 0xcc, 0x0e, 0xa1, 0xc1, 0x6b, 0x0c, 0xc3, 0xc3, 0xa4, 0x38, 0x6b, 0xa2, 0xbb, 0x71, 0xa6, 0x65, 0x6c, 0x2c, 0xb3, 0x19, 0xa8, 0x1e, 0x6c, 0xdd, 0xaa, 0xff, 0xa9, 0x9a, 0x6d, 0xa4, 0xa3, 0x05, 0xaa, 0xbf, 0x6e, 0xc7, 0x9a, 0xf8, 0xab, 0xb4, 0x70, 0x20, 0x92, 0xce, 0xac, 0x86, 0x71, 0xee, 0x8a, 0xc9, 0xad, 0x45, 0x73, 0xc4, 0x82, 0xd9, 0xae, 0x42, 0x76, 0x31, 0x7a, 0xbd, 0xaf, 0x6f, 0x78, 0xea, 0x72, 0xa1, 0xb0, 0xda, 0x7b, 0xb1, 0x6a, 0xde, 0xb2, 0x94, 0x7e, 0x62, 0x63, 0x31, 0xb4, 0x08, 0x81, 0x80, 0x5b, 0x6c, 0xb5, 0x39, 0x84, 0xf8, 0x53, 0x43, 0xa5, 0xa1, 0x61, 0x30, 0xd8, 0x9c, 0xa8, 0xf2, 0x62, 0xc9, 0xce, 0xd0, 0xab, 0x97, 0x63, 0x67, 0xc6, 0xe1, 0xae, 0x13, 0x63, 0xd4, 0xbf, 0x26, 0xaf, 0xd7, 0x64, 0xb7, 0xb7, 0x61, 0xb1, 0x7a, 0x65, 0x7d, 0xaf, 0x9b, 0xb2, 0xc1, 0x66, 0x82, 0xa7, 0xe9, 0xb3, 0xff, 0x67, 0x79, 0xa0, 0x37, 0xb4, 0xb9, 0x68, 0xee, 0x98, 0x67, 0xb5, 0x78, 0x6a, 0x5a, 0x90, 0x87, 0xb6, 0x37, 0x6c, 0x6b, 0x88, 0x46, 0xb7, 0x0b, 0x6e, 0x65, 0x80, 0x2c, 0xb8, 0x60, 0x71, 0x3f, 0x77, 0xf3, 0xb9, 0x8f, 0x73, 0xf7, 0x6f, 0xe6, 0xbb, 0x10, 0x76, 0xe9, 0x68, 0x24, 0xbc, 0xcd, 0x79, 0xbb, 0x60, 0x5a, 0xbe, 0x9c, 0x7d, 0x67, 0x57, 0xd8, 0xae, 0xf5, 0x5b, 0x70, 0xd9, 0xd8, 0xb2, 0x2c, 0x5c, 0xa2, 0xd0, 0x86, 0xb4, 0x7d, 0x5d, 0x19, 0xc9, 0x39, 0xb6, 0xb1, 0x5d, 0x7b, 0xc2, 0x06, 0xb8, 0x5a, 0x5e, 0x23, 0xba, 0xe0, 0xb9, 0xcc, 0x5e, 0xd6, 0xb3, 0xaf, 0xbb, 0x1f, 0x5f, 0xa0, 0xac, 0x69, 0xbc, 0x56, 0x60, 0xaa, 0xa5, 0x0f, 0xbd, 0x4d, 0x61, 0xef, 0x9d, 0xa8, 0xbd, 0xdf, 0x63, 0x7b, 0x96, 0x21, 0xbe, 0x78, 0x65, 0x1e, 0x8e, 0x5d, 0xbf, 0x20, 0x67, 0x52, 0x85, 0xea, 0xc0, 0x0d, 0x69, 0xaa, 0x7d, 0x96, 0xc1, 0x94, 0x6c, 0x95, 0x75, 0x40, 0xc3, 0x59, 0x6f, 0x89, 0x6d, 0x1c, 0xc4, 0xc4, 0x72, 0xbc, 0x64, 0xfd, 0xc6, 0x66, 0x76, 0x6d, 0x5b, 0xeb, 0xb7, 0x82, 0x56, 0x1f, 0xda, 0xc2, 0xba, 0x73, 0x57, 0x22, 0xd1, 0xf1, 0xbc, 0x9d, 0x57, 0xd5, 0xca, 0xfa, 0xbe, 0x9a, 0x58, 0x73, 0xc4, 0x45, 0xc0, 0x4c, 0x59, 0x0f, 0xbd, 0xad, 0xc1, 0x9a, 0x59, 0xb3, 0xb7, 0x24, 0xc2, 0xda, 0x5a, 0x4e, 0xb0, 0x88, 0xc3, 0xe4, 0x5b, 0x2f, 0xa9, 0xa8, 0xc4, 0xeb, 0x5c, 0x1c, 0xa2, 0xae, 0xc5, 0xb1, 0x5d, 0x48, 0x9b, 0x63, 0xc6, 0x55, 0x5e, 0xa3, 0x93, 0xe8, 0xc6, 0xfb, 0x60, 0x67, 0x8b, 0xe0, 0xc7, 0x92, 0x62, 0xa0, 0x83, 0x73, 0xc8, 0x9d, 0x65, 0x64, 0x7a, 0xdd, 0xca, 0x19, 0x68, 0x76, 0x72, 0x3f, 0xcb, 0xba, 0x6b, 0xe0, 0x69, 0xb3, 0xcd, 0xdf, 0x70, 0x14, 0x5f, 0x31, 0xbf, 0xdc, 0x51, 0x0f, 0xd9, 0xe6, 0xc2, 0x9f, 0x51, 0xfd, 0xd2, 0xf3, 0xc4, 0xd3, 0x52, 0xc5, 0xcc, 0xc1, 0xc6, 0xa9, 0x53, 0x53, 0xc6, 0xc5, 0xc8, 0x5c, 0x53, 0xbc, 0xc0, 0xc3, 0xc9, 0x83, 0x54, 0x5b, 0xba, 0xa2, 0xca, 0x91, 0x54, 0xfd, 0xb4, 0x6f, 0xcb, 0x88, 0x55, 0xb2, 0xae, 0x10, 0xcc, 0x60, 0x56, 0xa7, 0xa7, 0x60, 0xcd, 0x36, 0x57, 0xa2, 0xa0, 0x9b, 0xcd, 0xd2, 0x58, 0xee, 0x99, 0x2e, 0xce, 0x6d, 0x5a, 0x4e, 0x91, 0xaa, 0xcf, 0x08, 0x5c, 0x43, 0x89, 0x84, 0xcf, 0xb1, 0x5e, 0x53, 0x81, 0x33, 0xd0, 0xf0, 0x61, 0x75, 0x78, 0x0a, 0xd2, 0x69, 0x64, 0xb2, 0x6f, 0x18, 0xd4, 0x43, 0x68, 0xab, 0x65, 0x53, 0xc9, 0xa1, 0x4c, 0xea, 0xdb, 0x1a, 0xcb, 0xc5, 0x4d, 0xdb, 0xd4, 0xb6, 0xcd, 0xaf, 0x4e, 0x8f, 0xce, 0xd4, 0xcf, 0x24, 0x4e, 0xc8, 0xc9, 0x44, 0xd0, 0xac, 0x4e, 0xd2, 0xc3, 0xc5, 0xd1, 0xd8, 0x4f, 0x1b, 0xbe, 0x1c, 0xd2, 0xcc, 0x4f, 0xa4, 0xb8, 0x34, 0xd3, 0xb3, 0x50, 0x3d, 0xb2, 0x39, 0xd4, 0x84, 0x51, 0x14, 0xab, 0xeb, 0xd5, 0x3f, 0x52, 0x04, 0xa5, 0x65, 0xd5, 0xf7, 0x52, 0xfc, 0x9e, 0xb0, 0xd6, 0x79, 0x54, 0x52, 0x97, 0x45, 0xd7, 0x09, 0x55, 0xb2, 0x8f, 0xd4, 0xd7, 0xaa, 0x57, 0xb9, 0x87, 0xa9, 0xd8, 0x5e, 0x59, 0xff, 0x7f, 0x3a, 0xd9, 0x8d, 0x5d, 0x5d, 0x75, 0x75, 0xdb, 0x4b, 0x60, 0xf9, 0x6b, 0x4c, 0x4b, 0xdf, 0xbe, 0x93, 0xaf, 0x32, 0x4c, 0xd1, 0xbf, 0x93, 0xa6, 0xb4, 0x4d, 0x59, 0xc0, 0xcb, 0x9e, 0x6c, 0x4d, 0x1d, 0xc2, 0x7a, 0x95, 0xd1, 0x4c, 0xc1, 0xc4, 0x1a, 0x8d, 0x3f, 0x4c, 0xca, 0xc5, 0x52, 0x84, 0xcc, 0x4c, 0x9e, 0xc6, 0xa8, 0x7c, 0xa5, 0x4c, 0x67, 0xc7, 0xec, 0x74, 0xef, 0x4c, 0x3b, 0xc9, 0x26, 0x6d, 0x4c, 0x4c, 0x2a, 0xca, 0x71, 0x65, 0xc7, 0x4b, 0xfa, 0xcb, 0x80, 0x5e, 0x46, 0x4b, 0x9a, 0xcc, 0x09, 0x56, 0x6c, 0x4a, 0x65, 0xcc, 0xfb, 0x4e, 0x91, 0x4a, 0xfb, 0xcd, 0x2c, 0x48, 0x59, 0x4d, 0x79, 0xcc, 0x3d, 0x42, 0x4e, 0x51, 0x14, 0xcb, 0x3e, 0x3d, 0xac, 0x53, 0x64, 0xcb, 0x4f, 0x39, 0x81, 0x50, 0xdc, 0xb8, 0xf7, 0xb1, 0xb7, 0x52, 0x92, 0xb9, 0x7b, 0xa8, 0xee, 0x53, 0xf6, 0xba, 0x31, 0xa0, 0x6f, 0x54, 0x8c, 0xbb, 0x94, 0x97, 0xd0, 0x54, 0xe3, 0xbd, 0x18, 0x8f, 0x2c, 0x55, 0x2d, 0xbe, 0x98, 0x86, 0x92, 0x55, 0x51, 0xc0, 0x30, 0x7e, 0x1d, 0x55, 0x35, 0xc1, 0xec, 0x76, 0x4d, 0x55, 0x2e, 0xc3, 0x7a, 0x6e, 0x96, 0x55, 0x6b, 0xc4, 0xee, 0x67, 0x2d, 0x55, 0x90, 0xc6, 0x48, 0x5f, 0xf0, 0x55, 0xed, 0xc6, 0x9e, 0x58, 0x44, 0x56, 0x44, 0xc6, 0xfc, 0x50, 0xa6, 0x57, 0x6f, 0xc6, 0xcc, 0x4a, 0x7c, 0x58, 0x9f, 0xc6, 0x89, 0x44, 0x58, 0x5a, 0x21, 0xc6, 0x72, 0x3e, 0xf8, 0x5c, 0x3c, 0xc6, 0x9d, 0x3a, 0xc0, 0x55, 0x91, 0xb3, 0xa3, 0xb4, 0x4e, 0x57, 0xe3, 0xb3, 0xba, 0xab, 0x1e, 0x59, 0xe4, 0xb4, 0x10, 0xa2, 0x78, 0x5b, 0x50, 0xb4, 0xf3, 0x99, 0xde, 0x5c, 0x70, 0xb6, 0x10, 0x91, 0x3a, 0x5d, 0x10, 0xb7, 0x94, 0x88, 0xaa, 0x5d, 0x8f, 0xb9, 0x24, 0x80, 0x0e, 0x5d, 0x9a, 0xbb, 0x6a, 0x78, 0x32, 0x5d, 0x73, 0xbd, 0xad, 0x70, 0x43, 0x5d, 0xe1, 0xbf, 0xa0, 0x68, 0xaa, 0x5e, 0x4d, 0xc1, 0x34, 0x61, 0x7b, 0x5e, 0xd5, 0xc1, 0xc4, 0x59, 0xb8, 0x5f, 0x61, 0xc2, 0x1b, 0x51, 0xc1, 0x60, 0x70, 0xc2, 0x1b, 0x4b, 0xa4, 0x61, 0x89, 0xc1, 0xfb, 0x45, 0xd1, 0x62, 0xa0, 0xc1, 0xd5, 0x40, 0x0c, 0x64, 0xb4, 0xc1, 0xfc, 0x3b, 0x81, 0x5a, 0x82, 0xae, 0x4b, 0xb7, 0x64, 0x5d, 0x52, 0xad, 0xf7, 0xad, 0xa2, 0x5f, 0xb9, 0xae, 0x07, 0xa4, 0xee, 0x61, 0xe8, 0xae, 0x5b, 0x9c, 0x4d, 0x63, 0xb1, 0xaf, 0x32, 0x93, 0xa4, 0x64, 0xd6, 0xb0, 0x82, 0x8b, 0x15, 0x65, 0x8f, 0xb2, 0x0b, 0x82, 0xa2, 0x65, 0xf8, 0xb4, 0x17, 0x7a, 0xb0, 0x66, 0x22, 0xb6, 0x5a, 0x72, 0xf3, 0x66, 0x89, 0xb8, 0x60, 0x6b, 0x38, 0x67, 0x06, 0xba, 0x4f, 0x63, 0xba, 0x67, 0x8f, 0xbb, 0xac, 0x5c, 0x1d, 0x68, 0x35, 0xbc, 0x62, 0x54, 0x25, 0x69, 0x14, 0xbc, 0xb6, 0x4d, 0x53, 0x6a, 0x27, 0xbc, 0xac, 0x47, 0x82, 0x6b, 0x23, 0xbc, 0x9c, 0x41, 0x8d, 0x6c, 0xe5, 0xbc, 0xe7, 0x3c, 0xaa, 0x60, 0x70, 0xa8, 0x40, 0xbb, 0x24, 0x63, 0x9a, 0xa7, 0x86, 0xb1, 0x08, 0x66, 0x51, 0xa7, 0x51, 0xa8, 0x2a, 0x68, 0xfa, 0xa7, 0x29, 0x9f, 0x6c, 0x6b, 0x1b, 0xa7, 0xe1, 0x96, 0xe8, 0x6c, 0xe6, 0xa8, 0xd6, 0x8e, 0x65, 0x6d, 0xc1, 0xaa, 0x82, 0x86, 0x00, 0x6e, 0x6d, 0xac, 0x5a, 0x7d, 0xc1, 0x6e, 0xb9, 0xae, 0xb6, 0x75, 0xdf, 0x6f, 0x14, 0xb0, 0xef, 0x6e, 0x02, 0x6f, 0xe0, 0xb2, 0xc1, 0x66, 0x43, 0x70, 0x85, 0xb4, 0x84, 0x5e, 0xc5, 0x71, 0x47, 0xb5, 0xa9, 0x57, 0x1c, 0x72, 0x0a, 0xb6, 0x9f, 0x4f, 0xa2, 0x73, 0x40, 0xb7, 0x13, 0x49, 0xe6, 0x74, 0x73, 0xb7, 0x7e, 0x44, 0x20, 0x76, 0x1d, 0xb8, 0x21, 0x3e, 0x10, 0x66, 0xd7, 0xa1, 0xe9, 0xbe, 0xaf, 0x6a, 0x2e, 0xa1, 0x29, 0xb4, 0xeb, 0x6d, 0x60, 0xa0, 0x95, 0xab, 0xb7, 0x70, 0x4d, 0xa0, 0x37, 0xa2, 0xcb, 0x72, 0xae, 0xa0, 0x8f, 0x9a, 0x45, 0x74, 0xc1, 0xa1, 0x3f, 0x91, 0xeb, 0x76, 0x17, 0xa2, 0xaf, 0x89, 0xa4, 0x77, 0x23, 0xa4, 0x4c, 0x81, 0x61, 0x77, 0xbe, 0xa6, 0x55, 0x79, 0x9b, 0x78, 0x1a, 0xa8, 0x86, 0x71, 0xeb, 0x78, 0xad, 0xaa, 0x91, 0x69, 0xd8, 0x79, 0x4c, 0xac, 0x8d, 0x61, 0xe1, 0x7a, 0x40, 0xae, 0x56, 0x5a, 0x65, 0x7b, 0x36, 0xaf, 0xd9, 0x52, 0xe5, 0x7c, 0x77, 0xb1, 0x09, 0x4c, 0xb4, 0x7d, 0xda, 0xb2, 0x0a, 0x47, 0x0c, 0x7f, 0x5f, 0xb3, 0x03, 0x40, 0xbb, 0x6d, 0xfa, 0x9b, 0x3e, 0xc2, 0x88, 0x71, 0xfa, 0x9a, 0x33, 0xb9, 0x07, 0x75, 0xc5, 0x99, 0x53, 0xaf, 0xfb, 0x78, 0xc8, 0x98, 0xf7, 0xa7, 0x36, 0x7b, 0x7d, 0x98, 0xd8, 0x9e, 0x81, 0x7d, 0x63, 0x99, 0x9b, 0x96, 0x1b, 0x7e, 0xfe, 0x9a, 0x9f, 0x8d, 0xc0, 0x80, 0x0d, 0x9c, 0x3a, 0x85, 0x76, 0x80, 0xf2, 0x9e, 0x0a, 0x7d, 0x61, 0x81, 0x7e, 0xa0, 0x39, 0x75, 0xb8, 0x81, 0xfb, 0xa2, 0x4c, 0x6d, 0xf6, 0x82, 0x8d, 0xa4, 0x2e, 0x65, 0xac, 0x83, 0x51, 0xa6, 0x31, 0x5d, 0xec, 0x84, 0x8d, 0xa8, 0x3d, 0x56, 0xd1, 0x85, 0xae, 0xa9, 0xfd, 0x4f, 0xc5, 0x87, 0x19, 0xab, 0x9d, 0x49, 0xf7, 0x88, 0x9e, 0xad, 0x30, 0x43, 0xbf, 0x76, 0xbe, 0x93, 0xaa, 0xc6, 0xaf, 0x7a, 0xed, 0x92, 0x91, 0xbd, 0x3b, 0x7e, 0x81, 0x91, 0xf0, 0xb4, 0x77, 0x81, 0xa7, 0x91, 0x84, 0xab, 0xbf, 0x84, 0x71, 0x91, 0x57, 0xa2, 0xff, 0x86, 0x7a, 0x91, 0xeb, 0x9a, 0x88, 0x88, 0x1c, 0x92, 0xcb, 0x92, 0x37, 0x89, 0x29, 0x94, 0x3d, 0x89, 0xec, 0x8a, 0x04, 0x95, 0xe1, 0x81, 0xaa, 0x8a, 0xcb, 0x97, 0xe6, 0x79, 0xc5, 0x8b, 0x63, 0x9a, 0x0f, 0x72, 0x12, 0x8c, 0x24, 0x9c, 0x13, 0x6a, 0x07, 0x8c, 0xe1, 0x9d, 0xd6, 0x61, 0xc2, 0x8e, 0x13, 0xa0, 0x32, 0x5a, 0x6d, 0x8f, 0x54, 0xa2, 0x71, 0x53, 0x95, 0x90, 0x96, 0xa4, 0x85, 0x4d, 0x3f, 0x92, 0x1d, 0xa6, 0xb0, 0x46, 0xaa, 0x80, 0x4d, 0x8b, 0xec, 0xcb, 0x19, 0x84, 0x4e, 0x8b, 0x02, 0xc1, 0xeb, 0x87, 0xba, 0x8a, 0x75, 0xb9, 0x38, 0x8a, 0xfb, 0x89, 0xfc, 0xb0, 0x83, 0x8d, 0xa3, 0x8a, 0x0d, 0xa7, 0xde, 0x90, 0x08, 0x8a, 0x4f, 0x9f, 0x48, 0x91, 0x7f, 0x8b, 0x2b, 0x96, 0xf9, 0x92, 0xc2, 0x8c, 0x25, 0x8e, 0xb1, 0x93, 0x73, 0x8d, 0xbf, 0x86, 0x75, 0x94, 0x30, 0x8f, 0x87, 0x7e, 0x3d, 0x95, 0x0d, 0x91, 0x9b, 0x76, 0x55, 0x95, 0xd7, 0x93, 0xbe, 0x6e, 0x97, 0x96, 0xe5, 0x95, 0xe7, 0x66, 0xbc, 0x97, 0xef, 0x97, 0xfc, 0x5e, 0xee, 0x99, 0x56, 0x9a, 0x83, 0x57, 0xd1, 0x9a, 0xab, 0x9c, 0xe0, 0x50, 0xae, 0x9c, 0x14, 0x9f, 0x89, 0x49, 0xa8, 0x89, 0xb9, 0x84, 0x52, 0xcf, 0x98, 0x8d, 0x98, 0x83, 0xb8, 0xc6, 0xc5, 0x91, 0x3d, 0x83, 0x2f, 0xbe, 0x39, 0x94, 0x7a, 0x82, 0xd8, 0xb5, 0x67, 0x97, 0x43, 0x82, 0xc1, 0xac, 0xad, 0x99, 0x69, 0x83, 0x1d, 0xa4, 0x2d, 0x9b, 0x11, 0x83, 0xbd, 0x9b, 0xd1, 0x9c, 0x49, 0x84, 0x93, 0x93, 0x8f, 0x9d, 0x29, 0x85, 0xbb, 0x8b, 0x74, 0x9d, 0xcc, 0x87, 0x2a, 0x83, 0x72, 0x9e, 0xc2, 0x89, 0x3b, 0x7b, 0x6d, 0x9f, 0xd6, 0x8b, 0x87, 0x73, 0x77, 0xa0, 0xe8, 0x8d, 0xdb, 0x6b, 0x90, 0xa2, 0x01, 0x90, 0x2a, 0x63, 0x9c, 0xa3, 0x47, 0x92, 0xa9, 0x5c, 0x40, 0xa4, 0xb2, 0x95, 0x4a, 0x55, 0x1e, 0xa6, 0x19, 0x98, 0x14, 0x4d, 0x9f, 0x94, 0x46, 0x7c, 0x4b, 0xd3, 0xe0, 0x98, 0x10, 0x7b, 0xdc, 0xcb, 0x10, 0x9b, 0x76, 0x7b, 0x77, 0xc2, 0xb4, 0x9e, 0x82, 0x7b, 0x30, 0xba, 0x06, 0xa1, 0x46, 0x7b, 0x03, 0xb1, 0x26, 0xa3, 0x1b, 0x7b, 0x8a, 0xa8, 0xe0, 0xa4, 0xbc, 0x7c, 0x1f, 0xa0, 0xa9, 0xa5, 0xc3, 0x7d, 0x26, 0x98, 0x7f, 0xa6, 0xb0, 0x7e, 0x2d, 0x90, 0x53, 0xa7, 0x56, 0x7f, 0x74, 0x88, 0x62, 0xa8, 0x05, 0x80, 0x80, 0x80, 0x80, 0xa9, 0x47, 0x83, 0x66, 0x78, 0x86, 0xaa, 0x65, 0x85, 0xf3, 0x70, 0x9d, 0xab, 0xba, 0x88, 0x78, 0x68, 0xe8, 0xac, 0xfe, 0x8a, 0xc7, 0x61, 0x2b, 0xae, 0x41, 0x8d, 0xb5, 0x59, 0xa9, 0xaf, 0xba, 0x90, 0xd6, 0x51, 0xaa, 0x9f, 0xf5, 0x72, 0xfe, 0xd7, 0xfc, 0xa3, 0x12, 0x73, 0x37, 0xcf, 0x12, 0xa5, 0xe0, 0x73, 0x5a, 0xc6, 0xcc, 0xa8, 0x8d, 0x73, 0x71, 0xbe, 0x7b, 0xaa, 0xd5, 0x73, 0x8f, 0xb5, 0xf0, 0xac, 0xd1, 0x73, 0xd1, 0xad, 0x98, 0xae, 0x43, 0x74, 0x78, 0xa5, 0x97, 0xaf, 0x7a, 0x75, 0x51, 0x9d, 0x8d, 0xb0, 0x45, 0x76, 0xa5, 0x95, 0x74, 0xb1, 0x04, 0x78, 0x0d, 0x8d, 0x6e, 0xb1, 0xc4, 0x79, 0x92, 0x85, 0x90, 0xb2, 0xa5, 0x7b, 0x59, 0x7d, 0xa7, 0xb3, 0xd7, 0x7d, 0xf8, 0x75, 0xa2, 0xb5, 0x2f, 0x80, 0x90, 0x6d, 0xc3, 0xb6, 0xc3, 0x83, 0x3c, 0x66, 0x19, 0xb8, 0x3c, 0x85, 0xe6, 0x5e, 0x5d, 0xb9, 0x9d, 0x89, 0x2b, 0x56, 0x38, 0xa9, 0xe1, 0x6a, 0xa5, 0xdb, 0xd4, 0xad, 0x30, 0x6b, 0x67, 0xd2, 0x54, 0xaf, 0xdd, 0x6b, 0xbf, 0xca, 0x2b, 0xb2, 0x48, 0x6b, 0xdb, 0xc2, 0x2f, 0xb4, 0x3b, 0x6c, 0x26, 0xba, 0x22, 0xb5, 0xf6, 0x6c, 0x7e, 0xb2, 0x16, 0xb7, 0x51, 0x6d, 0x2c, 0xaa, 0x3d, 0xb8, 0x83, 0x6d, 0xf0, 0xa2, 0x72, 0xb9, 0x6b, 0x6f, 0x17, 0x9a, 0x89, 0xba, 0x36, 0x70, 0x65, 0x92, 0x8f, 0xba, 0xf5, 0x72, 0x30, 0x8a, 0xac, 0xbb, 0xb4, 0x73, 0xfb, 0x82, 0xd7, 0xbc, 0xc4, 0x76, 0x50, 0x7a, 0xe1, 0xbd, 0xfb, 0x78, 0xe8, 0x72, 0xe3, 0xbf, 0x86, 0x7b, 0x9f, 0x6b, 0x03, 0xc1, 0x55, 0x7e, 0x6c, 0x63, 0x21, 0xc3, 0x1a, 0x81, 0xb4, 0x5a, 0xd1, 0xb3, 0xa1, 0x63, 0x5a, 0xdd, 0x0b, 0xb6, 0x8e, 0x64, 0x1e, 0xd4, 0x6c, 0xb9, 0x01, 0x64, 0x72, 0xcc, 0x99, 0xbb, 0x30, 0x64, 0xa0, 0xc5, 0x16, 0xbd, 0x1b, 0x64, 0xef, 0xbd, 0x9e, 0xbe, 0x91, 0x65, 0x88, 0xb6, 0x27, 0xbf, 0xf0, 0x66, 0x27, 0xae, 0xb4, 0xc1, 0x06, 0x67, 0x26, 0xa7, 0x46, 0xc2, 0x0b, 0x68, 0x1e, 0x9f, 0xc9, 0xc2, 0xac, 0x69, 0x8d, 0x98, 0x20, 0xc3, 0x56, 0x6a, 0xea, 0x90, 0x67, 0xc4, 0x23, 0x6c, 0xe2, 0x88, 0x3d, 0xc5, 0x04, 0x6e, 0xc1, 0x80, 0x35, 0xc6, 0x7e, 0x71, 0x8e, 0x78, 0x05, 0xc7, 0xc1, 0x74, 0x3f, 0x6f, 0xf6, 0xc9, 0x1c, 0x77, 0x71, 0x67, 0xc9, 0xca, 0xdc, 0x7a, 0xf5, 0x5e, 0xb9, 0xbc, 0xc4, 0x5d, 0x86, 0xdd, 0xf0, 0xbf, 0x52, 0x5d, 0xec, 0xd5, 0xe0, 0xc1, 0x95, 0x5e, 0x16, 0xce, 0x93, 0xc3, 0x93, 0x5e, 0x4f, 0xc7, 0xb3, 0xc5, 0x6f, 0x5e, 0x79, 0xc0, 0xd9, 0xc6, 0xc0, 0x5e, 0xf8, 0xb9, 0xf0, 0xc7, 0xf3, 0x5f, 0x80, 0xb2, 0xf4, 0xc9, 0x07, 0x60, 0x59, 0xab, 0xdd, 0xc9, 0xfb, 0x61, 0x8a, 0xa4, 0xb2, 0xca, 0xc5, 0x62, 0xd9, 0x9d, 0x5f, 0xcb, 0x5a, 0x64, 0x65, 0x95, 0xce, 0xcb, 0xf8, 0x66, 0x07, 0x8e, 0x0c, 0xcc, 0xb1, 0x68, 0x0f, 0x85, 0xdc, 0xcd, 0xb4, 0x6a, 0x4a, 0x7d, 0x9d, 0xcf, 0x6a, 0x6d, 0x41, 0x75, 0x11, 0xd1, 0x2f, 0x70, 0x60, 0x6c, 0x7c, 0xd2, 0xa9, 0x74, 0x82, 0x62, 0x0c, 0xc5, 0xbc, 0x57, 0xbc, 0xde, 0x0c, 0xc8, 0x1e, 0x58, 0x4f, 0xd7, 0x30, 0xca, 0x48, 0x58, 0xe7, 0xd0, 0x9a, 0xcb, 0xfb, 0x59, 0x31, 0xca, 0x32, 0xcd, 0x8b, 0x59, 0x5f, 0xc3, 0xd1, 0xce, 0xd7, 0x59, 0xaf, 0xbd, 0x68, 0xcf, 0xd4, 0x5a, 0x3d, 0xb6, 0xe0, 0xd0, 0xd0, 0x5a, 0xcc, 0xb0, 0x4a, 0xd1, 0xac, 0x5b, 0xcb, 0xa9, 0x69, 0xd2, 0x81, 0x5c, 0xd0, 0xa2, 0x71, 0xd3, 0x37, 0x5e, 0x10, 0x9b, 0x17, 0xd3, 0xe7, 0x5f, 0x71, 0x93, 0x91, 0xd4, 0x8f, 0x61, 0x4e, 0x8b, 0xa4, 0xd5, 0x3b, 0x63, 0x7d, 0x83, 0x6d, 0xd6, 0x57, 0x66, 0x2b, 0x7a, 0xc3, 0xd7, 0xf1, 0x69, 0x3f, 0x71, 0xdc, 0xd9, 0xec, 0x6c, 0xe4, 0x68, 0x4f, 0xcf, 0x76, 0x51, 0xcf, 0xde, 0x40, 0xd1, 0x92, 0x52, 0x95, 0xd8, 0x6a, 0xd3, 0xae, 0x53, 0x06, 0xd2, 0xbe, 0xd5, 0x44, 0x53, 0x64, 0xcc, 0xe8, 0xd6, 0xa2, 0x53, 0xaf, 0xc7, 0x02, 0xd7, 0xae, 0x54, 0x40, 0xc0, 0xf8, 0xd8, 0xae, 0x54, 0xd7, 0xba, 0xd1, 0xd9, 0x6a, 0x55, 0x91, 0xb4, 0x7b, 0xda, 0x19, 0x56, 0x56, 0xad, 0xfd, 0xda, 0xd5, 0x57, 0x2b, 0xa7, 0x55, 0xdb, 0x86, 0x58, 0x06, 0xa0, 0x98, 0xdc, 0x2f, 0x59, 0x41, 0x99, 0x4d, 0xdc, 0xe6, 0x5a, 0x82, 0x91, 0xf7, 0xdd, 0x71, 0x5c, 0xa1, 0x89, 0xb6, 0xde, 0x54, 0x5e, 0xfe, 0x81, 0x3d, 0xdf, 0x47, 0x62, 0x12, 0x77, 0xdb, 0xe0, 0xd7, 0x65, 0x62, 0x6e, 0x2b, 0x53, 0x47, 0xc3, 0xc7, 0xb3, 0xac, 0x54, 0x42, 0xc4, 0xcc, 0xab, 0x4e, 0x54, 0x8a, 0xc6, 0x40, 0xa3, 0x25, 0x54, 0x73, 0xc7, 0xc3, 0x9a, 0xb0, 0x54, 0x14, 0xc9, 0x3b, 0x92, 0x09, 0x53, 0xe4, 0xca, 0x81, 0x89, 0x8e, 0x53, 0xae, 0xcb, 0xb9, 0x81, 0x19, 0x53, 0x8a, 0xcc, 0xd4, 0x79, 0x6b, 0x53, 0x61, 0xcd, 0xcd, 0x71, 0xd7, 0x53, 0x21, 0xce, 0xed, 0x6a, 0xa8, 0x52, 0xfb, 0xcf, 0xe4, 0x63, 0xa7, 0x52, 0xcc, 0xd0, 0x6b, 0x5c, 0xae, 0x52, 0x1a, 0xd0, 0xdd, 0x55, 0x92, 0x52, 0xab, 0xd0, 0xbb, 0x4e, 0xb7, 0x54, 0x3e, 0xcf, 0xd4, 0x48, 0x55, 0x54, 0xb8, 0xcf, 0x6c, 0x41, 0xcb, 0x56, 0xf8, 0xce, 0xfe, 0x3c, 0xc2, 0x58, 0x9d, 0xbe, 0x5e, 0xb6, 0x1c, 0x5a, 0x7e, 0xbe, 0xb3, 0xad, 0x54, 0x5b, 0x63, 0xbf, 0xc4, 0xa5, 0x05, 0x5b, 0xde, 0xc1, 0x07, 0x9c, 0x94, 0x5c, 0x00, 0xc2, 0x6b, 0x93, 0xd5, 0x5c, 0x1d, 0xc3, 0xc8, 0x8b, 0x37, 0x5c, 0x36, 0xc5, 0x1a, 0x82, 0xac, 0x5c, 0x1e, 0xc6, 0xa6, 0x7a, 0xc3, 0x5b, 0xf8, 0xc8, 0x25, 0x73, 0x1e, 0x5c, 0x05, 0xc9, 0x7f, 0x6b, 0xe8, 0x5c, 0x1d, 0xca, 0xc7, 0x65, 0x0d, 0x5c, 0x2e, 0xcb, 0xc5, 0x5e, 0x2f, 0x5c, 0x64, 0xcb, 0xc2, 0x56, 0xb4, 0x5c, 0xa5, 0xcb, 0xb5, 0x4f, 0x77, 0x5d, 0xa3, 0xcb, 0x1c, 0x49, 0x89, 0x5e, 0xa5, 0xca, 0x8b, 0x43, 0x92, 0x60, 0x13, 0xca, 0x40, 0x3e, 0x2a, 0x5d, 0x53, 0xb9, 0x16, 0xb8, 0xd8, 0x5f, 0xb4, 0xb9, 0x0e, 0xaf, 0x86, 0x61, 0x24, 0xb9, 0xbe, 0xa7, 0x18, 0x62, 0x72, 0xba, 0x85, 0x9e, 0xa2, 0x63, 0x52, 0xbb, 0xa8, 0x95, 0xdd, 0x63, 0xf3, 0xbc, 0xf5, 0x8d, 0x28, 0x64, 0x4c, 0xbe, 0x7c, 0x84, 0x7a, 0x64, 0x60, 0xc0, 0x57, 0x7c, 0x4c, 0x64, 0x40, 0xc2, 0x3a, 0x74, 0xbe, 0x64, 0x4d, 0xc3, 0xe2, 0x6d, 0x6b, 0x64, 0x94, 0xc5, 0x5a, 0x66, 0x8f, 0x64, 0xbb, 0xc6, 0xcb, 0x5f, 0xe4, 0x65, 0x0e, 0xc6, 0xd1, 0x58, 0x32, 0x65, 0x5b, 0xc6, 0xf3, 0x50, 0x81, 0x66, 0x3f, 0xc6, 0x72, 0x4a, 0x9f, 0x67, 0x27, 0xc5, 0xf6, 0x44, 0xad, 0x68, 0x51, 0xc5, 0x95, 0x3e, 0xe1, 0x62, 0x1b, 0xb4, 0x0b, 0xbc, 0x2b, 0x64, 0xc3, 0xb3, 0xa5, 0xb2, 0x22, 0x66, 0xde, 0xb3, 0xc3, 0xa9, 0x5e, 0x68, 0xd9, 0xb3, 0xff, 0xa0, 0xe2, 0x6a, 0x5c, 0xb4, 0xd9, 0x98, 0x38, 0x6b, 0xac, 0xb5, 0xd6, 0x8f, 0x8e, 0x6c, 0x36, 0xb7, 0x5c, 0x87, 0x15, 0x6c, 0x9a, 0xb9, 0x03, 0x7e, 0xb3, 0x6c, 0xa3, 0xbb, 0x43, 0x77, 0x11, 0x6c, 0x93, 0xbd, 0x80, 0x6f, 0x7d, 0x6c, 0xe7, 0xbf, 0x77, 0x68, 0x5d, 0x6d, 0x32, 0xc1, 0x19, 0x61, 0x98, 0x6d, 0xa1, 0xc1, 0xa9, 0x5a, 0x13, 0x6e, 0x17, 0xc1, 0xfb, 0x52, 0x32, 0x6e, 0xcb, 0xc1, 0xa1, 0x4b, 0xde, 0x6f, 0x8c, 0xc1, 0x1c, 0x45, 0xe0, 0x70, 0x55, 0xc0, 0xa4, 0x3f, 0xd9, 0x66, 0xe1, 0xaf, 0x17, 0xc0, 0xc3, 0x6a, 0x41, 0xae, 0x17, 0xb5, 0x7d, 0x6c, 0xf9, 0xad, 0x93, 0xac, 0x2b, 0x6f, 0x67, 0xad, 0x7a, 0xa3, 0xa2, 0x71, 0x7c, 0xad, 0xea, 0x9b, 0x09, 0x73, 0x41, 0xae, 0xc7, 0x92, 0x56, 0x74, 0x35, 0xb0, 0x2b, 0x89, 0xf1, 0x74, 0xdc, 0xb1, 0xa4, 0x81, 0xb5, 0x75, 0x20, 0xb3, 0xab, 0x79, 0xf8, 0x75, 0x36, 0xb5, 0xd4, 0x72, 0x6d, 0x75, 0x8e, 0xb7, 0xca, 0x6b, 0x07, 0x75, 0xe6, 0xb9, 0xb7, 0x63, 0xe3, 0x76, 0x4c, 0xbb, 0x1f, 0x5c, 0xa2, 0x76, 0xe0, 0xbb, 0xcd, 0x54, 0xf8, 0x77, 0x9f, 0xbc, 0x29, 0x4e, 0x12, 0x78, 0xbd, 0xbc, 0x3b, 0x48, 0x44, 0x7a, 0x14, 0xbc, 0x5a, 0x41, 0x9e, 0x6d, 0x76, 0xa8, 0xd5, 0xc3, 0x8c, 0x70, 0xec, 0xa7, 0xb7, 0xb9, 0x44, 0x74, 0x42, 0xa6, 0xc6, 0xaf, 0xba, 0x76, 0xd9, 0xa6, 0x8e, 0xa7, 0x02, 0x79, 0x31, 0xa6, 0x89, 0x9e, 0x5a, 0x7b, 0x03, 0xa7, 0x4f, 0x95, 0xdf, 0x7c, 0x80, 0xa8, 0x58, 0x8d, 0x78, 0x7d, 0x59, 0xa9, 0xe8, 0x85, 0x4a, 0x7d, 0xea, 0xab, 0xb0, 0x7d, 0x53, 0x7e, 0x04, 0xad, 0xee, 0x75, 0xb9, 0x7e, 0x1a, 0xb0, 0x22, 0x6e, 0x18, 0x7e, 0xa5, 0xb1, 0xf3, 0x66, 0xa3, 0x7f, 0x0e, 0xb3, 0xc6, 0x5f, 0x68, 0x7f, 0xec, 0xb4, 0xda, 0x58, 0x2f, 0x80, 0xae, 0xb5, 0xce, 0x50, 0xde, 0x81, 0xef, 0xb6, 0x92, 0x4a, 0xf3, 0x83, 0x61, 0xb7, 0x62, 0x44, 0x73, 0x74, 0xb0, 0xa2, 0x23, 0xc6, 0xe8, 0x78, 0xbd, 0xa0, 0xca, 0xbc, 0xa9, 0x7b, 0xfc, 0xa0, 0x02, 0xb3, 0x91, 0x7e, 0xc3, 0x9f, 0x8f, 0xaa, 0xaa, 0x81, 0x38, 0x9f, 0x50, 0xa1, 0xd7, 0x83, 0x0d, 0x9f, 0xe9, 0x99, 0x71, 0x84, 0xb4, 0xa0, 0xb2, 0x91, 0x31, 0x85, 0xd1, 0xa2, 0x30, 0x89, 0x13, 0x86, 0xb5, 0xa3, 0xc5, 0x80, 0xf4, 0x87, 0x1b, 0xa5, 0xda, 0x79, 0x64, 0x87, 0x4e, 0xa8, 0x05, 0x71, 0xea, 0x87, 0xb1, 0xa9, 0xf8, 0x6a, 0x26, 0x88, 0x16, 0xab, 0xda, 0x62, 0x74, 0x88, 0xe9, 0xad, 0x90, 0x5b, 0x37, 0x89, 0xdc, 0xae, 0xf9, 0x54, 0x08, 0x8b, 0x03, 0xb0, 0x48, 0x4d, 0x71, 0x8c, 0x89, 0xb1, 0x9e, 0x47, 0x36, 0x7d, 0x4d, 0x9a, 0xb7, 0xca, 0xcc, 0x81, 0xba, 0x99, 0x36, 0xc0, 0xd0, 0x84, 0xca, 0x98, 0x97, 0xb7, 0xff, 0x87, 0xb4, 0x98, 0x11, 0xaf, 0x33, 0x8a, 0x2c, 0x97, 0xfe, 0xa6, 0x7f, 0x8c, 0x51, 0x98, 0x38, 0x9d, 0xeb, 0x8d, 0xba, 0x99, 0x1b, 0x95, 0x9c, 0x8e, 0xdb, 0x9a, 0x46, 0x8d, 0x55, 0x8f, 0x94, 0x9b, 0xf9, 0x85, 0x25, 0x90, 0x28, 0x9d, 0xdb, 0x7d, 0x2d, 0x90, 0x79, 0xa0, 0x0d, 0x75, 0xa5, 0x90, 0xe3, 0xa2, 0x0f, 0x6e, 0x20, 0x91, 0x98, 0xa3, 0xad, 0x66, 0x27, 0x92, 0x54, 0xa5, 0x73, 0x5e, 0x96, 0x93, 0x82, 0xa7, 0x68, 0x57, 0xbc, 0x94, 0xa4, 0xa9, 0x1f, 0x50, 0xcd, 0x96, 0x1b, 0xaa, 0xf8, 0x4a, 0x2d, 0x85, 0xd4, 0x93, 0x5f, 0xcf, 0x44, 0x89, 0xe5, 0x92, 0x1c, 0xc5, 0x81, 0x8d, 0x5d, 0x91, 0x28, 0xbc, 0x82, 0x90, 0x5f, 0x90, 0x90, 0xb3, 0x98, 0x93, 0x04, 0x90, 0x8b, 0xaa, 0xee, 0x95, 0x4c, 0x90, 0xd1, 0xa2, 0x66, 0x96, 0xd5, 0x91, 0x89, 0x9a, 0x16, 0x98, 0x07, 0x92, 0x66, 0x91, 0xd2, 0x98, 0xd7, 0x93, 0xe2, 0x89, 0xb4, 0x99, 0x83, 0x95, 0x85, 0x81, 0xaa, 0x9a, 0x1e, 0x97, 0x8c, 0x79, 0xf5, 0x9a, 0x80, 0x99, 0xb9, 0x72, 0x79, 0x9b, 0x30, 0x9b, 0xb0, 0x6a, 0xb2, 0x9c, 0x0e, 0x9d, 0x57, 0x62, 0xa3, 0x9d, 0x3d, 0x9f, 0x70, 0x5b, 0x42, 0x9e, 0x99, 0xa1, 0xa2, 0x54, 0x5d, 0xa0, 0x19, 0xa3, 0xda, 0x4d, 0x2d, 0x8f, 0x02, 0x8b, 0xe9, 0xd3, 0x3f, 0x93, 0x25, 0x8a, 0xea, 0xca, 0x0e, 0x96, 0xd2, 0x8a, 0x17, 0xc1, 0x4c, 0x99, 0xed, 0x89, 0xa6, 0xb8, 0x82, 0x9c, 0xd4, 0x89, 0x49, 0xaf, 0xbb, 0x9e, 0xce, 0x89, 0xa4, 0xa7, 0x48, 0xa0, 0x87, 0x8a, 0x15, 0x9e, 0xe0, 0xa1, 0x7e, 0x8a, 0xf3, 0x96, 0x9f, 0xa2, 0x53, 0x8b, 0xf5, 0x8e, 0x6b, 0xa2, 0xe2, 0x8d, 0x81, 0x86, 0x6f, 0xa3, 0x7e, 0x8f, 0x39, 0x7e, 0x73, 0xa4, 0x59, 0x91, 0x53, 0x76, 0xad, 0xa5, 0x08, 0x93, 0x70, 0x6f, 0x1a, 0xa6, 0x08, 0x95, 0x97, 0x67, 0x64, 0xa7, 0x01, 0x97, 0x92, 0x5f, 0xa9, 0xa8, 0x60, 0x9a, 0x17, 0x58, 0x75, 0xa9, 0xca, 0x9c, 0x8c, 0x50, 0xbd, 0x99, 0xa7, 0x84, 0x3d, 0xd7, 0x5a, 0x9d, 0x5e, 0x83, 0xb9, 0xce, 0x20, 0xa0, 0x9a, 0x83, 0x20, 0xc5, 0xbe, 0xa3, 0x94, 0x82, 0xa8, 0xbd, 0x3f, 0xa6, 0x43, 0x82, 0x57, 0xb4, 0x6e, 0xa8, 0x50, 0x82, 0x69, 0xab, 0xed, 0xa9, 0xc2, 0x82, 0xdd, 0xa3, 0xa1, 0xaa, 0xd3, 0x83, 0x9e, 0x9b, 0x6a, 0xab, 0x9e, 0x84, 0x9e, 0x93, 0x42, 0xac, 0x50, 0x85, 0xcf, 0x8b, 0x4a, 0xac, 0xed, 0x87, 0x2a, 0x83, 0x71, 0xad, 0xc5, 0x89, 0x32, 0x7b, 0x93, 0xae, 0xb8, 0x8b, 0x82, 0x73, 0xbc, 0xaf, 0xc9, 0x8d, 0xb8, 0x6b, 0xfc, 0xb1, 0x04, 0x8f, 0xce, 0x64, 0x3c, 0xb2, 0x3f, 0x92, 0x47, 0x5c, 0xc0, 0xb3, 0xaf, 0x95, 0x45, 0x54, 0xae, 0xa4, 0x8c, 0x7c, 0x2b, 0xdb, 0x77, 0xa7, 0xa3, 0x7c, 0x1b, 0xd2, 0x75, 0xaa, 0x9e, 0x7b, 0xc9, 0xca, 0x00, 0xad, 0x79, 0x7b, 0x6f, 0xc1, 0xa5, 0xaf, 0xe9, 0x7b, 0x31, 0xb9, 0x1d, 0xb2, 0x0e, 0x7a, 0xfb, 0xb0, 0x96, 0xb3, 0x43, 0x7b, 0x7d, 0xa8, 0x72, 0xb4, 0x59, 0x7c, 0x06, 0xa0, 0x56, 0xb4, 0xfd, 0x7d, 0x35, 0x98, 0x38, 0xb5, 0x8b, 0x7e, 0x5f, 0x90, 0x15, 0xb6, 0x52, 0x7f, 0x8d, 0x88, 0x44, 0xb7, 0x16, 0x80, 0x80, 0x80, 0x80, 0xb8, 0x39, 0x83, 0x4d, 0x78, 0xa3, 0xb9, 0x45, 0x85, 0xc0, 0x70, 0xd1, 0xba, 0xd0, 0x88, 0x3c, 0x69, 0x1c, 0xbc, 0x65, 0x8a, 0x8f, 0x61, 0x59, 0xbd, 0xf0, 0x8d, 0x76, 0x59, 0x37, 0xae, 0xd4, 0x73, 0xe1, 0xdf, 0x32, 0xb1, 0xe2, 0x73, 0xf4, 0xd6, 0x29, 0xb4, 0x99, 0x73, 0xe9, 0xcd, 0xb6, 0xb7, 0x0f, 0x73, 0xbe, 0xc5, 0x69, 0xb9, 0x40, 0x73, 0x9f, 0xbd, 0x26, 0xba, 0xfd, 0x73, 0xb3, 0xb4, 0xf0, 0xbc, 0x6d, 0x74, 0x07, 0xac, 0xe8, 0xbd, 0x79, 0x74, 0xb1, 0xa5, 0x0e, 0xbe, 0x59, 0x75, 0x93, 0x9d, 0x2e, 0xbe, 0xf4, 0x76, 0xdb, 0x95, 0x3e, 0xbf, 0x91, 0x78, 0x3b, 0x8d, 0x53, 0xc0, 0x58, 0x79, 0xb5, 0x85, 0x8b, 0xc1, 0x3e, 0x7b, 0x65, 0x7d, 0xb7, 0xc2, 0x6e, 0x7d, 0xdf, 0x75, 0xc3, 0xc3, 0xc1, 0x80, 0x60, 0x6d, 0xde, 0xc5, 0x5a, 0x83, 0x4d, 0x66, 0x0f, 0xc7, 0x01, 0x86, 0x5d, 0x5d, 0xa0, 0xb8, 0x7b, 0x6c, 0x2e, 0xe1, 0x80, 0xbb, 0x5d, 0x6c, 0x5d, 0xd8, 0xa5, 0xbd, 0xe7, 0x6c, 0x70, 0xd0, 0x79, 0xc0, 0x24, 0x6c, 0x61, 0xc8, 0x80, 0xc2, 0x3b, 0x6c, 0x4c, 0xc0, 0xae, 0xc3, 0xaa, 0x6c, 0x94, 0xb8, 0xfb, 0xc4, 0xf4, 0x6c, 0xde, 0xb1, 0x49, 0xc5, 0xf8, 0x6d, 0x95, 0xa9, 0xa7, 0xc6, 0xe7, 0x6e, 0x56, 0xa2, 0x02, 0xc7, 0xa7, 0x6f, 0x7d, 0x9a, 0x3d, 0xc8, 0x52, 0x70, 0xcc, 0x92, 0x74, 0xc9, 0x08, 0x72, 0x86, 0x8a, 0xa6, 0xc9, 0xc5, 0x74, 0x3a, 0x82, 0xd9, 0xca, 0xe1, 0x76, 0x85, 0x7a, 0xe2, 0xcc, 0x2c, 0x79, 0x0c, 0x72, 0xe8, 0xcd, 0x90, 0x7b, 0xf4, 0x6a, 0xcd, 0xcf, 0x50, 0x7f, 0x62, 0x61, 0xfb, 0xc2, 0x1c, 0x65, 0x4d, 0xe2, 0x81, 0xc4, 0xa3, 0x65, 0x5d, 0xda, 0x34, 0xc6, 0xde, 0x65, 0x5b, 0xd2, 0xb0, 0xc8, 0xda, 0x65, 0x66, 0xcb, 0x56, 0xca, 0xa3, 0x65, 0x6e, 0xc4, 0x0f, 0xcc, 0x15, 0x65, 0xad, 0xbc, 0xd0, 0xcd, 0x27, 0x66, 0x2f, 0xb5, 0x8a, 0xce, 0x23, 0x66, 0xce, 0xae, 0x3f, 0xcf, 0x00, 0x67, 0xd0, 0xa6, 0xee, 0xcf, 0xd4, 0x68, 0xc9, 0x9f, 0x8d, 0xd0, 0x73, 0x6a, 0x34, 0x97, 0xe4, 0xd1, 0x14, 0x6b, 0x8d, 0x90, 0x34, 0xd1, 0xe9, 0x6d, 0x5c, 0x88, 0x2c, 0xd2, 0xc8, 0x6f, 0x0b, 0x80, 0x30, 0xd4, 0x5c, 0x71, 0xf6, 0x77, 0xcc, 0xd5, 0xcb, 0x74, 0xc9, 0x6f, 0x93, 0xd7, 0x64, 0x78, 0x69, 0x66, 0x03, 0xcb, 0xbc, 0x5e, 0x32, 0xe2, 0x77, 0xcd, 0xcb, 0x5e, 0x7a, 0xdb, 0x5e, 0xcf, 0xc5, 0x5e, 0xc1, 0xd4, 0x9b, 0xd1, 0xb8, 0x5e, 0xd5, 0xcd, 0xfc, 0xd3, 0x35, 0x5e, 0xde, 0xc7, 0x3c, 0xd4, 0x87, 0x5e, 0xf9, 0xc0, 0x73, 0xd5, 0x7d, 0x5f, 0x79, 0xb9, 0x97, 0xd6, 0x63, 0x5f, 0xff, 0xb2, 0xa8, 0xd7, 0x2b, 0x61, 0x06, 0xab, 0x9e, 0xd7, 0xe3, 0x62, 0x35, 0xa4, 0x7a, 0xd8, 0x8f, 0x63, 0x7b, 0x9d, 0x2e, 0xd9, 0x2f, 0x64, 0xf2, 0x95, 0xae, 0xd9, 0xcf, 0x66, 0x8a, 0x8d, 0xfd, 0xda, 0x80, 0x68, 0x8e, 0x85, 0xd0, 0xdb, 0x76, 0x6a, 0xbc, 0x7d, 0x71, 0xdd, 0x45, 0x6d, 0xda, 0x74, 0x89, 0xdf, 0x3e, 0x71, 0x46, 0x6b, 0x45, 0xd5, 0xe3, 0x57, 0x51, 0xe2, 0xf8, 0xd8, 0x09, 0x57, 0x81, 0xdc, 0xbb, 0xd9, 0xfc, 0x57, 0xd1, 0xd6, 0xc2, 0xdb, 0xcf, 0x58, 0x1f, 0xd0, 0xca, 0xdd, 0x03, 0x58, 0x98, 0xca, 0x90, 0xdd, 0xc9, 0x59, 0x4f, 0xc4, 0x21, 0xde, 0x7a, 0x5a, 0x01, 0xbd, 0x97, 0xdf, 0x3c, 0x5a, 0x9c, 0xb7, 0x04, 0xdf, 0xd4, 0x5b, 0x47, 0xb0, 0x53, 0xe0, 0x66, 0x5c, 0x2b, 0xa9, 0x6f, 0xe0, 0xfa, 0x5d, 0x17, 0xa2, 0x77, 0xe1, 0x9c, 0x5e, 0x49, 0x9b, 0x2d, 0xe2, 0x54, 0x5f, 0x92, 0x93, 0xbe, 0xe2, 0xfc, 0x61, 0x7a, 0x8b, 0xd0, 0xe3, 0xb9, 0x63, 0xa0, 0x83, 0x88, 0xe4, 0xb7, 0x66, 0x5a, 0x7a, 0xab, 0xe6, 0x54, 0x69, 0xb7, 0x71, 0x21, 0x5a, 0x8c, 0xc8, 0x37, 0xb7, 0x9b, 0x5b, 0xc2, 0xc9, 0x3e, 0xaf, 0x71, 0x5b, 0x8d, 0xcb, 0x52, 0xa7, 0x4b, 0x5b, 0x7c, 0xcd, 0x40, 0x9f, 0x57, 0x5a, 0x9b, 0xcf, 0x13, 0x96, 0x82, 0x5a, 0x83, 0xd0, 0x12, 0x8d, 0xfe, 0x59, 0xdc, 0xd1, 0x45, 0x85, 0x9d, 0x59, 0xd3, 0xd1, 0xf6, 0x7d, 0x9f, 0x59, 0xec, 0xd2, 0x8d, 0x76, 0x3b, 0x59, 0xf0, 0xd3, 0x24, 0x6e, 0xfc, 0x59, 0x9c, 0xd3, 0xf2, 0x68, 0x6a, 0x59, 0x3e, 0xd4, 0xb1, 0x61, 0xdc, 0x58, 0x6b, 0xd5, 0x16, 0x5a, 0xfc, 0x58, 0xe6, 0xd4, 0xbc, 0x54, 0x22, 0x58, 0x41, 0xd4, 0xd0, 0x4d, 0x58, 0x5a, 0xa6, 0xd3, 0x64, 0x47, 0x4b, 0x5b, 0x13, 0xd2, 0xe1, 0x40, 0xcf, 0x60, 0x0f, 0xc3, 0x00, 0xba, 0x1d, 0x61, 0xd5, 0xc3, 0x80, 0xb1, 0x89, 0x62, 0x8f, 0xc4, 0xae, 0xa9, 0x4b, 0x62, 0xee, 0xc6, 0x2c, 0xa1, 0x29, 0x63, 0x27, 0xc7, 0x6d, 0x98, 0x86, 0x63, 0x4d, 0xc8, 0x9c, 0x8f, 0xcd, 0x63, 0x56, 0xc9, 0xe4, 0x87, 0x5a, 0x63, 0x49, 0xcb, 0x31, 0x7f, 0x0b, 0x63, 0x12, 0xcc, 0xa8, 0x77, 0x8e, 0x62, 0xdd, 0xcd, 0xfc, 0x70, 0x0d, 0x62, 0xd5, 0xcf, 0x4e, 0x69, 0x96, 0x62, 0xb4, 0xd0, 0x5c, 0x63, 0x3c, 0x62, 0x9b, 0xd0, 0xbc, 0x5c, 0x83, 0x62, 0x8f, 0xd0, 0x93, 0x55, 0x33, 0x62, 0xc0, 0xd0, 0x42, 0x4e, 0x50, 0x63, 0x88, 0xcf, 0x58, 0x48, 0x6b, 0x64, 0x5b, 0xce, 0x70, 0x42, 0x50, 0x64, 0xa7, 0xbe, 0x7f, 0xbd, 0xa8, 0x67, 0x12, 0xbe, 0x7b, 0xb3, 0xd6, 0x68, 0x9f, 0xbe, 0xf8, 0xab, 0x47, 0x69, 0xa2, 0xbf, 0xf6, 0xa2, 0xe0, 0x6a, 0x5e, 0xc0, 0xfb, 0x9a, 0x54, 0x6a, 0xf6, 0xc2, 0x08, 0x91, 0xab, 0x6b, 0x46, 0xc3, 0x4c, 0x89, 0x1d, 0x6b, 0x77, 0xc4, 0x9b, 0x80, 0x97, 0x6b, 0x5d, 0xc6, 0x50, 0x79, 0x38, 0x6b, 0x26, 0xc7, 0xec, 0x71, 0xe3, 0x6b, 0x1f, 0xc9, 0x73, 0x6b, 0x28, 0x6b, 0x0b, 0xca, 0xfc, 0x64, 0xb8, 0x6a, 0xe1, 0xcc, 0x3c, 0x5e, 0x2b, 0x6a, 0xf2, 0xcb, 0xf5, 0x56, 0x9b, 0x6b, 0x15, 0xcb, 0xc0, 0x4f, 0x4c, 0x6b, 0xeb, 0xca, 0xce, 0x49, 0x6e, 0x6c, 0xbd, 0xc9, 0xea, 0x43, 0x44, 0x69, 0x63, 0xb9, 0x9f, 0xc1, 0x49, 0x6c, 0x1f, 0xb9, 0x3f, 0xb6, 0x9d, 0x6e, 0x3f, 0xb9, 0x15, 0xad, 0xa2, 0x6f, 0xd1, 0xb9, 0xa2, 0xa5, 0x24, 0x71, 0x3d, 0xba, 0x59, 0x9c, 0x9f, 0x72, 0x50, 0xbb, 0x60, 0x93, 0xf7, 0x72, 0xf0, 0xbc, 0xac, 0x8b, 0x64, 0x73, 0x32, 0xbe, 0x2b, 0x82, 0xbf, 0x73, 0x3a, 0xc0, 0x1f, 0x7a, 0xfd, 0x73, 0x40, 0xc1, 0xdd, 0x73, 0xd6, 0x73, 0x4d, 0xc3, 0x7c, 0x6c, 0xee, 0x73, 0x62, 0xc5, 0x0c, 0x66, 0x62, 0x73, 0x50, 0xc6, 0xa9, 0x60, 0x01, 0x73, 0x83, 0xc6, 0xa1, 0x58, 0x6b, 0x73, 0xad, 0xc6, 0xaf, 0x50, 0xc6, 0x74, 0x72, 0xc6, 0x04, 0x4a, 0xc6, 0x75, 0x55, 0xc5, 0x63, 0x44, 0x63, 0x6e, 0x49, 0xb4, 0xd5, 0xc5, 0x7a, 0x71, 0x95, 0xb3, 0xec, 0xb9, 0xd7, 0x74, 0x3c, 0xb3, 0x36, 0xb0, 0x4b, 0x76, 0x55, 0xb3, 0x56, 0xa7, 0xd3, 0x78, 0x48, 0xb3, 0x87, 0x9f, 0x64, 0x79, 0xc0, 0xb4, 0x68, 0x96, 0xb8, 0x7a, 0xeb, 0xb5, 0x6e, 0x8e, 0x2c, 0x7b, 0x68, 0xb6, 0xe1, 0x85, 0xed, 0x7b, 0xb4, 0xb8, 0x87, 0x7d, 0xe3, 0x7b, 0xa9, 0xba, 0x9e, 0x76, 0x8e, 0x7b, 0x90, 0xbc, 0xb0, 0x6f, 0x4e, 0x7b, 0xa2, 0xbe, 0xb4, 0x68, 0x63, 0x7b, 0xa2, 0xc0, 0x88, 0x61, 0xbd, 0x7b, 0xeb, 0xc1, 0x13, 0x5a, 0x81, 0x7c, 0x43, 0xc1, 0x43, 0x52, 0xf7, 0x7d, 0x10, 0xc1, 0x39, 0x4c, 0x83, 0x7e, 0x68, 0xc1, 0x1b, 0x45, 0x8d, 0x74, 0x68, 0xaf, 0x72, 0xc8, 0xa6, 0x78, 0x54, 0xad, 0xd5, 0xbc, 0xf3, 0x7b, 0x14, 0xad, 0x21, 0xb3, 0xac, 0x7d, 0x70, 0xac, 0xd9, 0xaa, 0xf7, 0x7f, 0x90, 0xac, 0xc0, 0xa2, 0x72, 0x81, 0x52, 0xad, 0x55, 0x99, 0xe0, 0x82, 0xda, 0xae, 0x2e, 0x91, 0x40, 0x83, 0x87, 0xaf, 0xa5, 0x89, 0x11, 0x84, 0x0c, 0xb1, 0x1a, 0x80, 0xfa, 0x84, 0x2d, 0xb3, 0x18, 0x79, 0x94, 0x84, 0x2c, 0xb5, 0x20, 0x72, 0x54, 0x84, 0x52, 0xb7, 0x07, 0x6b, 0x27, 0x84, 0x6c, 0xb8, 0xea, 0x64, 0x26, 0x84, 0xa7, 0xba, 0x5c, 0x5d, 0x1e, 0x85, 0x38, 0xba, 0xf7, 0x55, 0xc8, 0x85, 0xe9, 0xbb, 0x88, 0x4e, 0xc1, 0x87, 0x44, 0xbc, 0x2f, 0x48, 0x25, 0x7c, 0x20, 0xa8, 0x52, 0xcb, 0xf3, 0x80, 0x8b, 0xa6, 0xa7, 0xc0, 0x1b, 0x83, 0x1a, 0xa6, 0x36, 0xb7, 0x68, 0x85, 0x96, 0xa5, 0xd4, 0xae, 0xb8, 0x87, 0xbf, 0xa5, 0xbd, 0xa6, 0x18, 0x89, 0xa5, 0xa5, 0xf4, 0x9d, 0x8e, 0x8b, 0x00, 0xa6, 0xce, 0x95, 0x28, 0x8c, 0x04, 0xa7, 0xf7, 0x8c, 0xdb, 0x8c, 0x95, 0xa9, 0x92, 0x84, 0xbc, 0x8c, 0xe2, 0xab, 0x6b, 0x7c, 0xea, 0x8c, 0xdf, 0xad, 0x9f, 0x75, 0x8b, 0x8c, 0xd5, 0xaf, 0xc5, 0x6e, 0x2a, 0x8d, 0x36, 0xb1, 0x74, 0x66, 0xe5, 0x8d, 0x76, 0xb3, 0x24, 0x5f, 0xce, 0x8e, 0x4b, 0xb4, 0x1c, 0x58, 0xb6, 0x8f, 0x09, 0xb4, 0xfd, 0x51, 0x8f, 0x90, 0x5f, 0xb6, 0x34, 0x4a, 0xe6, 0x83, 0xc7, 0xa1, 0xa3, 0xcf, 0x20, 0x87, 0xea, 0xa0, 0x37, 0xc4, 0xbe, 0x8b, 0x3f, 0x9f, 0x44, 0xbb, 0x9f, 0x8d, 0xef, 0x9e, 0xb8, 0xb2, 0xc4, 0x90, 0x40, 0x9e, 0x8b, 0xaa, 0x0f, 0x92, 0x4b, 0x9e, 0x9d, 0xa1, 0x70, 0x93, 0x81, 0x9f, 0x6b, 0x99, 0x07, 0x94, 0x83, 0xa0, 0x55, 0x90, 0xa1, 0x95, 0x40, 0xa1, 0xe8, 0x88, 0xa5, 0x95, 0xde, 0xa3, 0x7f, 0x80, 0xa8, 0x96, 0x12, 0xa5, 0x98, 0x79, 0x4d, 0x96, 0x1d, 0xa7, 0xc2, 0x72, 0x04, 0x96, 0x7b, 0xa9, 0x8a, 0x6a, 0x78, 0x96, 0xe5, 0xab, 0x2f, 0x62, 0xf3, 0x97, 0xba, 0xac, 0xcd, 0x5b, 0xe1, 0x98, 0xdf, 0xae, 0x3c, 0x55, 0x00, 0x9a, 0x29, 0xaf, 0xaf, 0x4d, 0xc7, 0x8b, 0xe7, 0x9a, 0xa0, 0xd3, 0x21, 0x90, 0x32, 0x99, 0x1f, 0xc9, 0x33, 0x94, 0x0b, 0x97, 0xfa, 0xc0, 0x1f, 0x96, 0xc1, 0x97, 0x8b, 0xb7, 0x4b, 0x99, 0x40, 0x97, 0x3e, 0xae, 0x9d, 0x9b, 0x33, 0x97, 0x7a, 0xa6, 0x13, 0x9c, 0xc6, 0x97, 0xe8, 0x9d, 0x95, 0x9d, 0xab, 0x98, 0xc3, 0x95, 0x2c, 0x9e, 0x6a, 0x99, 0xec, 0x8c, 0xee, 0x9e, 0xfb, 0x9b, 0x8d, 0x85, 0x06, 0x9f, 0x64, 0x9d, 0x63, 0x7d, 0x55, 0x9f, 0x6b, 0x9f, 0x9f, 0x76, 0x13, 0x9f, 0x91, 0xa1, 0xb6, 0x6e, 0xce, 0xa0, 0x74, 0xa3, 0x25, 0x66, 0xe1, 0xa1, 0x44, 0xa4, 0xb0, 0x5f, 0x40, 0xa2, 0x98, 0xa6, 0xb0, 0x58, 0x6c, 0xa3, 0xf8, 0xa8, 0xa4, 0x50, 0xe7, 0x95, 0x87, 0x93, 0x1a, 0xd6, 0xdc, 0x99, 0x94, 0x91, 0xf3, 0xcd, 0x1b, 0x9c, 0xc7, 0x91, 0x18, 0xc4, 0x58, 0x9f, 0x9e, 0x90, 0x89, 0xbb, 0xb7, 0xa2, 0x5b, 0x90, 0x24, 0xb3, 0x23, 0xa4, 0x60, 0x90, 0x35, 0xaa, 0x9c, 0xa5, 0xf5, 0x90, 0x87, 0xa2, 0x14, 0xa6, 0xe3, 0x91, 0x55, 0x99, 0xc6, 0xa7, 0x9a, 0x92, 0x45, 0x91, 0x85, 0xa8, 0x25, 0x93, 0xbf, 0x89, 0x99, 0xa8, 0x96, 0x95, 0x50, 0x81, 0xbe, 0xa9, 0x19, 0x97, 0x43, 0x7a, 0x26, 0xa9, 0x6e, 0x99, 0x53, 0x72, 0xc6, 0xaa, 0x12, 0x9b, 0x3f, 0x6b, 0x31, 0xaa, 0xf2, 0x9c, 0xf8, 0x63, 0x64, 0xac, 0x15, 0x9f, 0x00, 0x5b, 0xe1, 0xad, 0x8f, 0xa1, 0x52, 0x54, 0x11, 0xa0, 0x40, 0x8b, 0x60, 0xda, 0xd5, 0xa3, 0xc5, 0x8a, 0xbb, 0xd1, 0x3a, 0xa6, 0xbd, 0x8a, 0x1e, 0xc8, 0xc9, 0xa9, 0x9b, 0x89, 0x88, 0xc0, 0x70, 0xac, 0x0b, 0x89, 0x33, 0xb7, 0xd7, 0xae, 0x33, 0x88, 0xf5, 0xaf, 0x4b, 0xaf, 0x61, 0x89, 0x5c, 0xa6, 0xdd, 0xb0, 0x5e, 0x89, 0xe6, 0x9e, 0x78, 0xb0, 0xec, 0x8a, 0xf6, 0x96, 0x4e, 0xb1, 0x6d, 0x8c, 0x18, 0x8e, 0x35, 0xb1, 0xdb, 0x8d, 0x8a, 0x86, 0x65, 0xb2, 0x4a, 0x8f, 0x29, 0x7e, 0x94, 0xb3, 0x28, 0x91, 0x33, 0x76, 0xe4, 0xb3, 0xe0, 0x93, 0x2e, 0x6f, 0x61, 0xb5, 0x07, 0x95, 0x3a, 0x67, 0xcb, 0xb6, 0x1a, 0x97, 0x27, 0x60, 0x21, 0xb7, 0x92, 0x9a, 0x03, 0x57, 0x84, 0xaa, 0x64, 0x84, 0x42, 0xdf, 0x1c, 0xad, 0x81, 0x83, 0xda, 0xd5, 0xed, 0xb0, 0x67, 0x83, 0x62, 0xcd, 0x3f, 0xb3, 0x13, 0x82, 0xeb, 0xc4, 0xd2, 0xb5, 0x71, 0x82, 0x82, 0xbc, 0x55, 0xb7, 0x67, 0x82, 0x45, 0xb3, 0xcf, 0xb8, 0xc1, 0x82, 0x6b, 0xab, 0x81, 0xb9, 0xae, 0x82, 0xd6, 0xa3, 0x57, 0xba, 0x52, 0x83, 0xa7, 0x9b, 0x33, 0xba, 0xc2, 0x84, 0xbb, 0x93, 0x10, 0xbb, 0x52, 0x85, 0xe2, 0x8b, 0x2c, 0xbb, 0xdf, 0x87, 0x1c, 0x83, 0x6e, 0xbc, 0xab, 0x88, 0xfc, 0x7b, 0xa9, 0xbd, 0xac, 0x8b, 0x29, 0x73, 0xea, 0xbe, 0xfa, 0x8d, 0x51, 0x6c, 0x2f, 0xc0, 0x94, 0x8f, 0x84, 0x64, 0x67, 0xc1, 0xea, 0x92, 0x23, 0x5c, 0x4a, 0xb4, 0x00, 0x7d, 0x0f, 0xe3, 0x23, 0xb7, 0x12, 0x7c, 0xa4, 0xd9, 0xe0, 0xb9, 0xda, 0x7c, 0x49, 0xd1, 0x49, 0xbc, 0x6c, 0x7b, 0xc2, 0xc8, 0xca, 0xbe, 0xda, 0x7b, 0x2b, 0xc0, 0x52, 0xc0, 0x7e, 0x7b, 0x22, 0xb8, 0x0c, 0xc1, 0xf1, 0x7b, 0x28, 0xaf, 0xdc, 0xc2, 0xc0, 0x7b, 0xae, 0xa7, 0xf8, 0xc3, 0x83, 0x7c, 0x2e, 0xa0, 0x12, 0xc3, 0xe5, 0x7d, 0x5c, 0x98, 0x05, 0xc4, 0x35, 0x7e, 0x7f, 0x8f, 0xeb, 0xc4, 0xfd, 0x7f, 0x9c, 0x88, 0x32, 0xc5, 0xbc, 0x80, 0x80, 0x80, 0x80, 0xc6, 0xd5, 0x83, 0x22, 0x78, 0xc5, 0xc7, 0xdd, 0x85, 0x74, 0x71, 0x14, 0xc9, 0x55, 0x88, 0x39, 0x69, 0x40, 0xcb, 0x01, 0x8b, 0x3e, 0x60, 0x6a, 0xbd, 0xd6, 0x74, 0xed, 0xe5, 0xe7, 0xc0, 0xc3, 0x74, 0xa8, 0xdc, 0xbb, 0xc3, 0x3f, 0x74, 0x76, 0xd4, 0x6e, 0xc5, 0x87, 0x74, 0x35, 0xcc, 0x3b, 0xc7, 0x91, 0x73, 0xee, 0xc4, 0x18, 0xc9, 0x29, 0x73, 0xdd, 0xbc, 0x19, 0xca, 0x52, 0x73, 0xf8, 0xb4, 0x34, 0xcb, 0x4b, 0x74, 0x58, 0xac, 0x69, 0xcc, 0x15, 0x75, 0x02, 0xa4, 0xb7, 0xcc, 0xbd, 0x75, 0xeb, 0x9c, 0xf9, 0xcd, 0x36, 0x77, 0x30, 0x95, 0x23, 0xcd, 0xc1, 0x78, 0x82, 0x8d, 0x4c, 0xce, 0x7d, 0x79, 0xe1, 0x85, 0x89, 0xcf, 0x60, 0x7b, 0x7d, 0x7d, 0xba, 0xd0, 0xb5, 0x7d, 0xd6, 0x75, 0xd8, 0xd2, 0x1d, 0x80, 0x4f, 0x6d, 0xeb, 0xd3, 0xa5, 0x83, 0xda, 0x65, 0x27, 0xc7, 0x4d, 0x6d, 0x43, 0xe7, 0xa8, 0xca, 0x15, 0x6d, 0x18, 0xde, 0x86, 0xcc, 0x4a, 0x6d, 0x07, 0xd6, 0xc1, 0xce, 0x5e, 0x6c, 0xfa, 0xcf, 0x22, 0xd0, 0x21, 0x6c, 0xe4, 0xc7, 0x74, 0xd1, 0xb9, 0x6c, 0xcc, 0xbf, 0xdb, 0xd2, 0x9a, 0x6d, 0x0f, 0xb8, 0x54, 0xd3, 0x74, 0x6d, 0x4e, 0xb0, 0xc9, 0xd4, 0x43, 0x6e, 0x07, 0xa9, 0x4c, 0xd5, 0x0b, 0x6e, 0xc0, 0xa1, 0xc7, 0xd5, 0xb0, 0x6f, 0xe5, 0x9a, 0x21, 0xd6, 0x3e, 0x71, 0x3b, 0x92, 0x83, 0xd6, 0xe9, 0x72, 0xce, 0x8a, 0xba, 0xd7, 0xa8, 0x74, 0x59, 0x82, 0xdc, 0xd8, 0xee, 0x76, 0xb7, 0x7a, 0xb9, 0xda, 0x7a, 0x79, 0x61, 0x72, 0x91, 0xdc, 0x22, 0x7c, 0x62, 0x69, 0xe2, 0xd1, 0x1f, 0x65, 0xb4, 0xe7, 0xe7, 0xd3, 0x92, 0x65, 0xb1, 0xdf, 0xb9, 0xd5, 0x9a, 0x65, 0xaf, 0xd8, 0xc5, 0xd7, 0x98, 0x65, 0xa1, 0xd1, 0xe4, 0xd9, 0x05, 0x65, 0xac, 0xca, 0xcb, 0xda, 0x25, 0x65, 0xcc, 0xc3, 0x96, 0xdb, 0x0e, 0x66, 0x23, 0xbc, 0x68, 0xdb, 0xcf, 0x66, 0xa4, 0xb5, 0x3e, 0xdc, 0x84, 0x67, 0x46, 0xae, 0x0a, 0xdd, 0x2a, 0x68, 0x46, 0xa6, 0xbe, 0xdd, 0xcd, 0x69, 0x3f, 0x9f, 0x63, 0xde, 0x64, 0x6a, 0x97, 0x97, 0xdf, 0xde, 0xf5, 0x6b, 0xe2, 0x90, 0x58, 0xdf, 0x9f, 0x6d, 0x93, 0x88, 0x3d, 0xe0, 0x54, 0x6f, 0x11, 0x80, 0x22, 0xe2, 0x53, 0x72, 0x59, 0x77, 0x77, 0xe4, 0x5b, 0x75, 0x52, 0x6e, 0xe1, 0xdb, 0xc8, 0x5d, 0xd2, 0xe8, 0x56, 0xde, 0x47, 0x5d, 0xbb, 0xe1, 0x52, 0xe0, 0xe2, 0x5d, 0x46, 0xdb, 0x08, 0xe2, 0x6d, 0x5d, 0x7d, 0xd4, 0xbd, 0xe4, 0x13, 0x5d, 0x8f, 0xce, 0x75, 0xe4, 0x30, 0x5e, 0x6f, 0xc7, 0x8b, 0xe4, 0x51, 0x5f, 0x34, 0xc0, 0x89, 0xe4, 0xef, 0x5f, 0xd2, 0xb9, 0xba, 0xe5, 0x8a, 0x60, 0x7b, 0xb2, 0xe2, 0xe6, 0x13, 0x61, 0x84, 0xab, 0xe5, 0xe6, 0x96, 0x62, 0xaf, 0xa4, 0xc8, 0xe7, 0x17, 0x63, 0xe9, 0x9d, 0x7b, 0xe7, 0xa5, 0x65, 0x44, 0x95, 0xf7, 0xe8, 0x48, 0x66, 0xb2, 0x8e, 0x45, 0xe9, 0x31, 0x68, 0x84, 0x86, 0x07, 0xea, 0x42, 0x6a, 0xc9, 0x7d, 0x7a, 0xec, 0x06, 0x6e, 0x1a, 0x74, 0x28, 0x62, 0xa2, 0xcb, 0xfc, 0xba, 0xff, 0x63, 0x5f, 0xcd, 0x42, 0xb3, 0x2f, 0x62, 0x7d, 0xd0, 0x0b, 0xab, 0x16, 0x62, 0x69, 0xd1, 0xe7, 0xa3, 0x56, 0x62, 0x6a, 0xd3, 0x23, 0x9b, 0x40, 0x62, 0x37, 0xd4, 0x14, 0x92, 0xb4, 0x61, 0xee, 0xd4, 0xed, 0x8a, 0x73, 0x61, 0xcb, 0xd5, 0x85, 0x82, 0x59, 0x61, 0xc2, 0xd6, 0x11, 0x7a, 0xe5, 0x61, 0x74, 0xd6, 0xbc, 0x73, 0xa2, 0x61, 0x2e, 0xd7, 0x5c, 0x6c, 0xea, 0x60, 0xcc, 0xd7, 0xfc, 0x66, 0xa8, 0x5f, 0xe3, 0xd8, 0xce, 0x60, 0x50, 0x5f, 0x75, 0xd8, 0xa7, 0x59, 0x92, 0x5f, 0xed, 0xd8, 0x1c, 0x52, 0xda, 0x60, 0x29, 0xd7, 0xa2, 0x4c, 0x61, 0x60, 0xdc, 0xd6, 0xd2, 0x46, 0x44, 0x67, 0xc3, 0xc7, 0x48, 0xbd, 0x6b, 0x69, 0x29, 0xc7, 0xee, 0xb5, 0x5f, 0x6a, 0x36, 0xc8, 0xd2, 0xad, 0x7c, 0x6a, 0xa4, 0xca, 0x3a, 0xa5, 0x63, 0x6a, 0xed, 0xcb, 0xa3, 0x9d, 0x2a, 0x6b, 0x1a, 0xcc, 0xc9, 0x94, 0x6c, 0x6b, 0x1c, 0xce, 0x0c, 0x8b, 0xe0, 0x6a, 0xf9, 0xcf, 0x63, 0x83, 0x8a, 0x6a, 0x9b, 0xd0, 0xa1, 0x7b, 0xed, 0x6a, 0x1f, 0xd1, 0xa9, 0x74, 0xcf, 0x69, 0xb5, 0xd2, 0xa0, 0x6e, 0x09, 0x69, 0x63, 0xd3, 0x8f, 0x67, 0xe7, 0x69, 0x08, 0xd4, 0x67, 0x61, 0xc5, 0x68, 0xc2, 0xd4, 0x7a, 0x5b, 0x04, 0x68, 0x89, 0xd4, 0x42, 0x53, 0xef, 0x68, 0xab, 0xd3, 0xd1, 0x4d, 0x46, 0x69, 0x45, 0xd3, 0x03, 0x47, 0x21, 0x6c, 0x7a, 0xc3, 0x33, 0xbf, 0xf6, 0x6e, 0x49, 0xc3, 0x5f, 0xb7, 0x9d, 0x70, 0x04, 0xc3, 0x83, 0xaf, 0x79, 0x70, 0xfd, 0xc4, 0x6f, 0xa7, 0x2e, 0x71, 0xd2, 0xc5, 0x6c, 0x9e, 0xef, 0x72, 0x63, 0xc6, 0x72, 0x96, 0x4f, 0x72, 0xc6, 0xc7, 0x97, 0x8d, 0xbd, 0x72, 0xd8, 0xc8, 0xeb, 0x85, 0x55, 0x72, 0xbf, 0xca, 0x5e, 0x7d, 0x6f, 0x72, 0x65, 0xcb, 0xe9, 0x76, 0x51, 0x71, 0xf8, 0xcd, 0x6d, 0x6f, 0x45, 0x71, 0xa7, 0xcf, 0x12, 0x69, 0x10, 0x71, 0x41, 0xd0, 0x6c, 0x63, 0x03, 0x70, 0xec, 0xd0, 0xe0, 0x5c, 0x67, 0x70, 0xad, 0xd0, 0xa5, 0x55, 0x08, 0x70, 0xaf, 0xd0, 0x61, 0x4d, 0xf5, 0x71, 0x7f, 0xcf, 0x06, 0x48, 0x05, 0x71, 0x1a, 0xbf, 0x41, 0xc3, 0x05, 0x73, 0x7d, 0xbe, 0xcc, 0xba, 0x0c, 0x75, 0xc1, 0xbe, 0x52, 0xb1, 0x9f, 0x77, 0x35, 0xbe, 0xe3, 0xa9, 0x35, 0x78, 0x69, 0xbf, 0xbc, 0xa0, 0xda, 0x79, 0x44, 0xc0, 0xa5, 0x98, 0x56, 0x7a, 0x03, 0xc1, 0x92, 0x8f, 0xd5, 0x7a, 0x46, 0xc2, 0xd1, 0x87, 0x72, 0x7a, 0x6f, 0xc4, 0x24, 0x7f, 0x3b, 0x7a, 0x4c, 0xc5, 0xc4, 0x78, 0x3f, 0x7a, 0x07, 0xc7, 0x57, 0x71, 0x3f, 0x79, 0xd4, 0xc8, 0xe7, 0x6a, 0xc9, 0x79, 0x8c, 0xca, 0x79, 0x64, 0x85, 0x79, 0x37, 0xcb, 0xa9, 0x5e, 0x1b, 0x79, 0x2e, 0xcb, 0x41, 0x56, 0xc0, 0x79, 0x34, 0xca, 0xf4, 0x4f, 0x86, 0x79, 0xfe, 0xca, 0x2d, 0x49, 0x5a, 0x76, 0xce, 0xb9, 0xa7, 0xc7, 0xc2, 0x79, 0xa9, 0xb8, 0xf6, 0xbc, 0xec, 0x7b, 0xe4, 0xb8, 0x97, 0xb4, 0x58, 0x7d, 0xc2, 0xb8, 0xa7, 0xab, 0xeb, 0x7f, 0x4e, 0xb9, 0x20, 0xa3, 0x8b, 0x80, 0x96, 0xb9, 0xe6, 0x9b, 0x0b, 0x81, 0x98, 0xba, 0xe1, 0x92, 0x75, 0x82, 0x0d, 0xbc, 0x32, 0x8a, 0x1c, 0x82, 0x44, 0xbd, 0xa0, 0x81, 0xbc, 0x82, 0x26, 0xbf, 0x87, 0x7a, 0x62, 0x82, 0x0d, 0xc1, 0x37, 0x73, 0x75, 0x81, 0xf5, 0xc2, 0xce, 0x6c, 0xc3, 0x81, 0xd3, 0xc4, 0x5d, 0x66, 0x52, 0x81, 0x94, 0xc5, 0xed, 0x60, 0x05, 0x81, 0xb7, 0xc5, 0xcf, 0x58, 0xbf, 0x81, 0xda, 0xc5, 0xc4, 0x51, 0x77, 0x82, 0xcd, 0xc5, 0x88, 0x4a, 0x44, 0x7d, 0x22, 0xb4, 0x06, 0xcc, 0x02, 0x80, 0x71, 0xb3, 0x04, 0xbf, 0xc2, 0x82, 0xa2, 0xb2, 0xcd, 0xb7, 0x62, 0x84, 0xc0, 0xb2, 0xa4, 0xaf, 0x05, 0x86, 0x92, 0xb2, 0xce, 0xa6, 0xa3, 0x88, 0x33, 0xb3, 0x24, 0x9e, 0x38, 0x89, 0x5a, 0xb4, 0x00, 0x95, 0xa1, 0x8a, 0x23, 0xb5, 0x13, 0x8d, 0x39, 0x8a, 0x74, 0xb6, 0x79, 0x85, 0x17, 0x8a, 0x94, 0xb8, 0x18, 0x7d, 0x55, 0x8a, 0x6e, 0xba, 0x1c, 0x76, 0x3f, 0x8a, 0x38, 0xbc, 0x16, 0x6f, 0x3d, 0x8a, 0x1b, 0xbd, 0xee, 0x68, 0x71, 0x89, 0xdd, 0xbf, 0xce, 0x61, 0xd3, 0x8a, 0x15, 0xc0, 0x69, 0x5a, 0xbc, 0x8a, 0x70, 0xc0, 0xa4, 0x53, 0x68, 0x8b, 0x45, 0xc1, 0x01, 0x4c, 0x6a, 0x84, 0x2e, 0xae, 0x5b, 0xcf, 0x1c, 0x87, 0xa1, 0xad, 0x17, 0xc4, 0x11, 0x8a, 0x43, 0xac, 0x6e, 0xbb, 0x1c, 0x8c, 0x87, 0xac, 0x24, 0xb2, 0xab, 0x8e, 0x84, 0xac, 0x16, 0xaa, 0x36, 0x90, 0x51, 0xac, 0x2c, 0xa1, 0xba, 0x91, 0x76, 0xac, 0xec, 0x99, 0x39, 0x92, 0x5b, 0xad, 0xd0, 0x90, 0xa7, 0x92, 0xad, 0xaf, 0x5d, 0x88, 0x7d, 0x92, 0xf0, 0xb0, 0xdb, 0x80, 0x63, 0x92, 0xee, 0xb2, 0xd9, 0x79, 0x43, 0x92, 0xcf, 0xb4, 0xd7, 0x72, 0x3a, 0x92, 0xdb, 0xb6, 0x91, 0x6b, 0x3d, 0x92, 0xe2, 0xb8, 0x35, 0x64, 0x60, 0x93, 0x11, 0xb9, 0x8b, 0x5d, 0x7c, 0x93, 0xb8, 0xba, 0x34, 0x56, 0x5a, 0x94, 0x77, 0xba, 0xf1, 0x4f, 0x05, 0x8b, 0x8c, 0xa8, 0x19, 0xd2, 0x4c, 0x8f, 0x26, 0xa6, 0xa6, 0xc8, 0x58, 0x92, 0x88, 0xa5, 0x8d, 0xbf, 0x41, 0x94, 0xda, 0xa5, 0x45, 0xb6, 0xaa, 0x96, 0xf4, 0xa5, 0x14, 0xae, 0x22, 0x98, 0xb0, 0xa5, 0x3e, 0xa5, 0x97, 0x9a, 0x0b, 0xa5, 0xac, 0x9d, 0x12, 0x9a, 0xc7, 0xa6, 0x89, 0x94, 0x8c, 0x9b, 0x5d, 0xa7, 0xbc, 0x8c, 0x4c, 0x9b, 0xcc, 0xa9, 0x47, 0x84, 0x57, 0x9b, 0xf0, 0xab, 0x1c, 0x7c, 0xb6, 0x9b, 0xbc, 0xad, 0x53, 0x75, 0x83, 0x9b, 0x90, 0xaf, 0x68, 0x6e, 0x4b, 0x9b, 0xe2, 0xb0, 0xdc, 0x67, 0x28, 0x9c, 0x1e, 0xb2, 0x4e, 0x60, 0x2f, 0x9d, 0x40, 0xb3, 0x6a, 0x59, 0x6e, 0x9e, 0x6e, 0xb4, 0x89, 0x52, 0x24, 0x93, 0x63, 0xa1, 0x69, 0xd6, 0x3a, 0x97, 0x7f, 0x9f, 0xdd, 0xcc, 0x4e, 0x9a, 0xec, 0x9e, 0xe6, 0xc3, 0x7f, 0x9d, 0x72, 0x9e, 0x5c, 0xba, 0xdc, 0x9f, 0x85, 0x9e, 0x04, 0xb2, 0x47, 0xa1, 0x3f, 0x9e, 0x25, 0xa9, 0xbb, 0xa2, 0xbb, 0x9e, 0x6b, 0xa1, 0x24, 0xa3, 0x79, 0x9f, 0x33, 0x98, 0xa0, 0xa4, 0x17, 0xa0, 0x0c, 0x90, 0x20, 0xa4, 0x92, 0xa1, 0xa1, 0x88, 0x67, 0xa4, 0xf5, 0xa3, 0x27, 0x80, 0x99, 0xa5, 0x02, 0xa5, 0x36, 0x79, 0x6c, 0xa4, 0xee, 0xa7, 0x57, 0x72, 0x54, 0xa5, 0x46, 0xa9, 0x09, 0x6a, 0xf0, 0xa5, 0xbd, 0xaa, 0x81, 0x63, 0x80, 0xa6, 0x91, 0xac, 0x19, 0x5c, 0x69, 0xa7, 0xc9, 0xad, 0xcb, 0x55, 0x04, 0x9d, 0xbc, 0x99, 0xc7, 0xda, 0x46, 0xa1, 0x72, 0x98, 0x85, 0xcf, 0xfe, 0xa4, 0x3e, 0x97, 0xe9, 0xc7, 0xa3, 0xa6, 0xdb, 0x97, 0x56, 0xbf, 0x4b, 0xa8, 0xf1, 0x97, 0x10, 0xb6, 0xcb, 0xaa, 0xbf, 0x96, 0xef, 0xae, 0x4b, 0xab, 0xec, 0x97, 0x3e, 0xa5, 0xa8, 0xac, 0xd2, 0x97, 0xc9, 0x9d, 0x28, 0xad, 0x63, 0x98, 0xbc, 0x94, 0xf0, 0xad, 0xd1, 0x99, 0xec, 0x8c, 0xe3, 0xad, 0xff, 0x9b, 0x74, 0x85, 0x0f, 0xae, 0x23, 0x9d, 0x26, 0x7d, 0x68, 0xae, 0x33, 0x9f, 0x1d, 0x76, 0x2c, 0xae, 0x50, 0xa1, 0x09, 0x6f, 0x00, 0xaf, 0x3a, 0xa2, 0xa8, 0x67, 0x5e, 0xaf, 0xf8, 0xa4, 0x42, 0x5f, 0xe1, 0xb1, 0x5a, 0xa6, 0x6a, 0x58, 0x30, 0xa7, 0xbd, 0x92, 0x7a, 0xde, 0x0b, 0xaa, 0xd1, 0x91, 0x9f, 0xd4, 0xb0, 0xad, 0x90, 0x90, 0xe3, 0xcb, 0xf3, 0xb0, 0x2a, 0x90, 0x41, 0xc3, 0x9e, 0xb2, 0x50, 0x8f, 0xf2, 0xbb, 0x35, 0xb4, 0x1b, 0x8f, 0xc8, 0xb2, 0xbf, 0xb5, 0x46, 0x90, 0x04, 0xaa, 0x3e, 0xb6, 0x29, 0x90, 0x71, 0xa1, 0xb6, 0xb6, 0xa6, 0x91, 0x63, 0x99, 0x81, 0xb7, 0x08, 0x92, 0x6a, 0x91, 0x58, 0xb7, 0x4d, 0x93, 0xcd, 0x89, 0x8d, 0xb7, 0x7a, 0x95, 0x42, 0x81, 0xd2, 0xb7, 0xf4, 0x97, 0x15, 0x7a, 0x4f, 0xb8, 0x48, 0x98, 0xf7, 0x72, 0xf8, 0xb8, 0xf8, 0x9a, 0xd4, 0x6b, 0x74, 0xb9, 0xf8, 0x9c, 0x99, 0x63, 0xba, 0xbb, 0x25, 0x9e, 0xab, 0x5b, 0xa4, 0xb1, 0x9e, 0x8b, 0x65, 0xe2, 0x8a, 0xb4, 0x98, 0x8a, 0xcb, 0xd9, 0x3f, 0xb7, 0x46, 0x8a, 0x40, 0xd0, 0x77, 0xb9, 0xb3, 0x89, 0xc9, 0xc8, 0x0b, 0xbb, 0xf3, 0x89, 0x56, 0xbf, 0x9f, 0xbd, 0x89, 0x89, 0x2c, 0xb7, 0x37, 0xbe, 0xeb, 0x89, 0x1c, 0xae, 0xdb, 0xbf, 0x9e, 0x89, 0x86, 0xa6, 0x98, 0xc0, 0x31, 0x8a, 0x12, 0x9e, 0x5b, 0xc0, 0x71, 0x8b, 0x1c, 0x96, 0x35, 0xc0, 0xb1, 0x8c, 0x2f, 0x8e, 0x28, 0xc0, 0xea, 0x8d, 0x71, 0x86, 0x68, 0xc1, 0x2c, 0x8e, 0xda, 0x7e, 0xa7, 0xc2, 0x21, 0x90, 0xcc, 0x77, 0x12, 0xc2, 0xe0, 0x92, 0xb9, 0x6f, 0xa4, 0xc4, 0x29, 0x95, 0x03, 0x68, 0x08, 0xc5, 0x67, 0x97, 0x3b, 0x60, 0x1d, 0xba, 0xef, 0x84, 0x83, 0xe6, 0xa9, 0xbd, 0xe3, 0x84, 0x21, 0xdd, 0x4c, 0xc0, 0x69, 0x83, 0xba, 0xd4, 0xc8, 0xc2, 0xaa, 0x83, 0x42, 0xcc, 0x50, 0xc4, 0xbb, 0x82, 0xb4, 0xc3, 0xd5, 0xc6, 0x5d, 0x82, 0x6c, 0xbb, 0x78, 0xc7, 0xa7, 0x82, 0x54, 0xb3, 0x39, 0xc8, 0x77, 0x82, 0x92, 0xab, 0x25, 0xc8, 0xff, 0x83, 0x05, 0xa3, 0x25, 0xc9, 0x5e, 0x83, 0xd8, 0x9b, 0x19, 0xc9, 0x9e, 0x84, 0xe1, 0x93, 0x03, 0xca, 0x10, 0x85, 0xf3, 0x8b, 0x32, 0xca, 0x8b, 0x87, 0x10, 0x83, 0x88, 0xcb, 0x4b, 0x88, 0xca, 0x7b, 0xe2, 0xcc, 0x42, 0x8a, 0xdf, 0x74, 0x4a, 0xcd, 0x6a, 0x8d, 0x29, 0x6c, 0x99, 0xcf, 0x00, 0x8f, 0xe1, 0x64, 0x62, 0xc3, 0xd8, 0x7d, 0x69, 0xe9, 0xc4, 0xc6, 0x8f, 0x7d, 0x17, 0xe0, 0xdb, 0xc8, 0xfe, 0x7c, 0xa2, 0xd8, 0x67, 0xcb, 0x59, 0x7c, 0x24, 0xd0, 0x02, 0xcd, 0x51, 0x7b, 0xb1, 0xc7, 0xa9, 0xcf, 0x1d, 0x7b, 0x55, 0xbf, 0x64, 0xd0, 0x2a, 0x7b, 0x47, 0xb7, 0x6b, 0xd1, 0x15, 0x7b, 0x4a, 0xaf, 0x7a, 0xd1, 0x95, 0x7b, 0xe3, 0xa7, 0xae, 0xd2, 0x0c, 0x7c, 0x77, 0x9f, 0xde, 0xd2, 0x62, 0x7d, 0x92, 0x97, 0xf6, 0xd2, 0xaf, 0x7e, 0x9a, 0x90, 0x05, 0xd3, 0x6b, 0x7f, 0xa7, 0x88, 0x44, 0xd4, 0x2e, 0x80, 0x80, 0x80, 0x80, 0xd5, 0x5b, 0x83, 0x24, 0x78, 0xce, 0xd6, 0x6d, 0x85, 0x7e, 0x71, 0x17, 0xd7, 0xcb, 0x88, 0x7d, 0x68, 0xca, 0xcd, 0x2a, 0x75, 0x9e, 0xec, 0x1c, 0xcf, 0xe7, 0x75, 0x3f, 0xe2, 0xe6, 0xd1, 0xfd, 0x74, 0xf8, 0xda, 0xe1, 0xd3, 0xf3, 0x74, 0xb4, 0xd3, 0x00, 0xd5, 0xa6, 0x74, 0x77, 0xcb, 0x1b, 0xd7, 0x20, 0x74, 0x40, 0xc3, 0x30, 0xd8, 0x30, 0x74, 0x38, 0xbb, 0x6a, 0xd8, 0xfe, 0x74, 0x52, 0xb3, 0xb8, 0xd9, 0xb8, 0x74, 0xbd, 0xac, 0x18, 0xda, 0x59, 0x75, 0x6c, 0xa4, 0x84, 0xda, 0xe7, 0x76, 0x50, 0x9c, 0xec, 0xdb, 0x60, 0x77, 0x78, 0x95, 0x50, 0xdb, 0xee, 0x78, 0xa3, 0x8d, 0x9a, 0xdc, 0xb9, 0x79, 0xea, 0x85, 0xab, 0xdd, 0xc9, 0x7b, 0x8b, 0x7d, 0xa2, 0xdf, 0x68, 0x7e, 0x0c, 0x75, 0x8b, 0xe1, 0x26, 0x80, 0xac, 0x6d, 0x44, 0xd6, 0x6b, 0x6d, 0xf7, 0xec, 0xf3, 0xd8, 0xc4, 0x6d, 0xb5, 0xe4, 0xa5, 0xda, 0xd4, 0x6d, 0x84, 0xdd, 0x12, 0xdc, 0xb3, 0x6d, 0x5c, 0xd5, 0xbb, 0xde, 0x66, 0x6d, 0x33, 0xce, 0x69, 0xdf, 0x80, 0x6d, 0x1f, 0xc6, 0xd0, 0xe0, 0x7f, 0x6d, 0x1d, 0xbf, 0x47, 0xe1, 0x33, 0x6d, 0x69, 0xb7, 0xf3, 0xe1, 0xe7, 0x6d, 0xb0, 0xb0, 0xa0, 0xe2, 0x8b, 0x6e, 0x77, 0xa9, 0x3a, 0xe3, 0x2f, 0x6f, 0x38, 0xa1, 0xc7, 0xe3, 0xc4, 0x70, 0x5d, 0x9a, 0x40, 0xe4, 0x4e, 0x71, 0xa9, 0x92, 0xbe, 0xe5, 0x0a, 0x73, 0x25, 0x8a, 0xe5, 0xe5, 0xee, 0x74, 0xaa, 0x82, 0xdd, 0xe7, 0x6d, 0x77, 0x0e, 0x7a, 0x90, 0xe9, 0x32, 0x79, 0xcb, 0x72, 0x14, 0xe0, 0x96, 0x66, 0x0f, 0xed, 0x63, 0xe2, 0xf2, 0x65, 0xd9, 0xe6, 0x00, 0xe5, 0x3b, 0x65, 0x8a, 0xdf, 0x02, 0xe6, 0xc5, 0x65, 0x96, 0xd8, 0x48, 0xe8, 0x3f, 0x65, 0xa9, 0xd1, 0x9a, 0xe8, 0xfc, 0x66, 0x0c, 0xca, 0x9a, 0xe9, 0x9d, 0x66, 0x6b, 0xc3, 0x89, 0xea, 0x51, 0x66, 0xce, 0xbc, 0x8f, 0xea, 0xfc, 0x67, 0x46, 0xb5, 0x9d, 0xeb, 0x7c, 0x67, 0xee, 0xae, 0x88, 0xec, 0x10, 0x68, 0xd4, 0xa7, 0x51, 0xec, 0x81, 0x69, 0xc6, 0x9f, 0xf4, 0xed, 0x0b, 0x6a, 0xfe, 0x98, 0x58, 0xed, 0x9a, 0x6c, 0x33, 0x90, 0xb2, 0xee, 0xa1, 0x6e, 0x04, 0x88, 0x70, 0xef, 0xe6, 0x6f, 0xee, 0x80, 0x23, 0xf2, 0x7e, 0x72, 0xa0, 0x77, 0x09, 0x6a, 0xd0, 0xcf, 0x73, 0xbd, 0xff, 0x6b, 0x8f, 0xd0, 0xba, 0xb6, 0xae, 0x6b, 0xb0, 0xd2, 0x7c, 0xaf, 0x76, 0x6a, 0x9b, 0xd5, 0x09, 0xa7, 0x53, 0x6a, 0xc9, 0xd6, 0x5f, 0x9f, 0xd0, 0x6a, 0x74, 0xd7, 0x6c, 0x97, 0x5e, 0x6a, 0x14, 0xd8, 0x64, 0x8f, 0x10, 0x69, 0xab, 0xd9, 0x15, 0x87, 0x3a, 0x69, 0x6d, 0xd9, 0x93, 0x7f, 0x80, 0x68, 0xb9, 0xda, 0x51, 0x78, 0x88, 0x68, 0x5d, 0xda, 0xc4, 0x71, 0x81, 0x67, 0xc3, 0xdb, 0x5d, 0x6b, 0x42, 0x66, 0xaa, 0xdc, 0x26, 0x65, 0x0e, 0x66, 0x93, 0xdc, 0x46, 0x5e, 0xf2, 0x65, 0xed, 0xdc, 0x1e, 0x58, 0x55, 0x66, 0x16, 0xdb, 0xa1, 0x51, 0xc1, 0x66, 0x05, 0xdb, 0x39, 0x4b, 0x29, 0x6f, 0xd1, 0xcb, 0x08, 0xc0, 0x55, 0x71, 0x23, 0xcb, 0xbe, 0xb8, 0xbb, 0x72, 0x4c, 0xcc, 0x98, 0xb1, 0x6e, 0x72, 0xd9, 0xcd, 0xd2, 0xa9, 0x8e, 0x73, 0x1a, 0xcf, 0x3c, 0xa1, 0x98, 0x73, 0x2b, 0xd0, 0x87, 0x99, 0x10, 0x73, 0x0e, 0xd1, 0xbc, 0x90, 0x75, 0x72, 0xb5, 0xd2, 0xe6, 0x88, 0x68, 0x72, 0x44, 0xd3, 0xfa, 0x80, 0x74, 0x71, 0xa1, 0xd4, 0xf5, 0x79, 0x93, 0x70, 0xfa, 0xd5, 0xd6, 0x72, 0xaf, 0x70, 0x70, 0xd6, 0xb2, 0x6c, 0x66, 0x6f, 0xf0, 0xd7, 0x7d, 0x66, 0x68, 0x6f, 0x6a, 0xd8, 0x37, 0x60, 0x6a, 0x6f, 0x07, 0xd8, 0x07, 0x59, 0xa9, 0x6e, 0xa8, 0xd7, 0xca, 0x52, 0xc8, 0x6e, 0xad, 0xd7, 0x4f, 0x4c, 0x1e, 0x74, 0xd1, 0xc6, 0xf0, 0xc2, 0xe9, 0x76, 0x96, 0xc7, 0x22, 0xba, 0xd2, 0x78, 0x18, 0xc7, 0x8a, 0xb3, 0x34, 0x79, 0x1c, 0xc8, 0x52, 0xab, 0x53, 0x79, 0xac, 0xc9, 0x68, 0xa3, 0x3f, 0x7a, 0x11, 0xca, 0x8d, 0x9a, 0xe9, 0x7a, 0x5a, 0xcb, 0xc4, 0x92, 0x5a, 0x7a, 0x59, 0xcd, 0x20, 0x89, 0xf7, 0x7a, 0x33, 0xce, 0x8f, 0x81, 0x92, 0x79, 0xb5, 0xd0, 0x03, 0x7a, 0x7a, 0x79, 0x16, 0xd1, 0x26, 0x73, 0xbf, 0x78, 0x89, 0xd2, 0x3b, 0x6d, 0x67, 0x78, 0x10, 0xd3, 0x42, 0x67, 0x7b, 0x77, 0x90, 0xd4, 0x35, 0x61, 0x8f, 0x77, 0x20, 0xd4, 0x31, 0x5a, 0xe3, 0x76, 0xc0, 0xd3, 0xec, 0x53, 0xd0, 0x76, 0xb6, 0xd3, 0x5e, 0x4c, 0xf8, 0x79, 0xe6, 0xc2, 0xd5, 0xc5, 0xec, 0x7b, 0xd7, 0xc2, 0xbd, 0xbd, 0x19, 0x7d, 0x9c, 0xc2, 0xcd, 0xb5, 0x3c, 0x7f, 0x1c, 0xc3, 0x1f, 0xad, 0x52, 0x80, 0x18, 0xc3, 0xf3, 0xa5, 0x27, 0x80, 0xe7, 0xc4, 0xe4, 0x9c, 0xf2, 0x81, 0x78, 0xc5, 0xef, 0x94, 0x7f, 0x81, 0xc1, 0xc7, 0x20, 0x8c, 0x26, 0x81, 0xc8, 0xc8, 0x71, 0x83, 0xd6, 0x81, 0x90, 0xc9, 0xe4, 0x7c, 0x5e, 0x81, 0x1f, 0xcb, 0x65, 0x75, 0x89, 0x80, 0x99, 0xcc, 0xe2, 0x6e, 0xd5, 0x80, 0x1b, 0xce, 0x67, 0x68, 0xb4, 0x7f, 0x89, 0xcf, 0xd7, 0x62, 0xb5, 0x7f, 0x18, 0xd0, 0x1c, 0x5c, 0x37, 0x7e, 0xc7, 0xcf, 0x98, 0x55, 0x13, 0x7e, 0xc6, 0xce, 0xf4, 0x4d, 0xf2, 0x7f, 0x30, 0xbe, 0x6c, 0xca, 0x3f, 0x81, 0x8f, 0xbe, 0x00, 0xbf, 0xaa, 0x83, 0xaa, 0xbd, 0xdf, 0xb7, 0xc1, 0x85, 0x9e, 0xbd, 0xd0, 0xaf, 0xda, 0x86, 0xd8, 0xbe, 0xa3, 0xa7, 0x92, 0x87, 0xe8, 0xbf, 0x8d, 0x9f, 0x52, 0x88, 0x8a, 0xc0, 0x79, 0x96, 0xd2, 0x89, 0x11, 0xc1, 0x64, 0x8e, 0x76, 0x89, 0x40, 0xc2, 0x84, 0x86, 0x58, 0x89, 0x49, 0xc3, 0xca, 0x7e, 0x8d, 0x89, 0x09, 0xc5, 0x63, 0x77, 0xd0, 0x88, 0xac, 0xc6, 0xed, 0x71, 0x11, 0x88, 0x54, 0xc8, 0x66, 0x6a, 0xb5, 0x87, 0xe7, 0xc9, 0xdb, 0x64, 0x7c, 0x87, 0x89, 0xca, 0xeb, 0x5e, 0x21, 0x87, 0x9b, 0xca, 0xaa, 0x57, 0x11, 0x87, 0xb8, 0xca, 0x76, 0x4f, 0x6f, 0x86, 0x1e, 0xb8, 0xef, 0xce, 0xa8, 0x88, 0xda, 0xb8, 0x42, 0xc3, 0x78, 0x8a, 0xe8, 0xb8, 0x02, 0xba, 0xfe, 0x8c, 0xc4, 0xb7, 0xfb, 0xb2, 0xee, 0x8e, 0x49, 0xb8, 0x51, 0xaa, 0xb6, 0x8f, 0x88, 0xb8, 0xe6, 0xa2, 0x66, 0x90, 0x58, 0xb9, 0xbf, 0x99, 0xed, 0x90, 0xef, 0xba, 0xb2, 0x91, 0x63, 0x91, 0x1d, 0xbb, 0xee, 0x89, 0x3a, 0x91, 0x31, 0xbd, 0x25, 0x81, 0x12, 0x90, 0xe2, 0xbf, 0x11, 0x7a, 0x00, 0x90, 0x97, 0xc0, 0xe0, 0x73, 0x3d, 0x90, 0x61, 0xc2, 0x64, 0x6c, 0xb8, 0x90, 0x20, 0xc3, 0xce, 0x66, 0x5b, 0x8f, 0xc5, 0xc5, 0x3c, 0x60, 0x13, 0x90, 0x13, 0xc5, 0x60, 0x58, 0xfd, 0x90, 0x61, 0xc5, 0x8e, 0x51, 0xb2, 0x8c, 0xe3, 0xb3, 0xef, 0xd1, 0xa7, 0x8f, 0xaf, 0xb2, 0xeb, 0xc7, 0x6d, 0x92, 0x2b, 0xb2, 0x3e, 0xbe, 0x95, 0x94, 0x28, 0xb2, 0x40, 0xb6, 0x79, 0x95, 0xfb, 0xb2, 0x4e, 0xae, 0x59, 0x97, 0x6e, 0xb2, 0x9f, 0xa5, 0xf3, 0x98, 0x8f, 0xb3, 0x1d, 0x9d, 0x83, 0x99, 0x12, 0xb3, 0xf4, 0x94, 0xf2, 0x99, 0x6d, 0xb5, 0x05, 0x8c, 0xa7, 0x99, 0x96, 0xb6, 0x55, 0x84, 0xaf, 0x99, 0x84, 0xb7, 0xe8, 0x7d, 0x27, 0x99, 0x2a, 0xb9, 0xdb, 0x76, 0x3f, 0x98, 0xc1, 0xbb, 0xc1, 0x6f, 0x5d, 0x98, 0x9c, 0xbd, 0x50, 0x68, 0xae, 0x98, 0x5a, 0xbe, 0xe9, 0x62, 0x1d, 0x98, 0x93, 0xbf, 0xbc, 0x5b, 0x29, 0x99, 0x11, 0xc0, 0x2c, 0x53, 0xe5, 0x94, 0x00, 0xae, 0xb7, 0xd5, 0x1b, 0x97, 0x5f, 0xad, 0x24, 0xcb, 0x6e, 0x9a, 0x23, 0xac, 0x38, 0xc2, 0xbd, 0x9c, 0x48, 0xab, 0xde, 0xba, 0x67, 0x9e, 0x1c, 0xab, 0xb3, 0xb2, 0x2d, 0x9f, 0xa2, 0xab, 0xd9, 0xa9, 0xbe, 0xa1, 0x04, 0xac, 0x29, 0xa1, 0x3f, 0xa1, 0x6e, 0xac, 0xf3, 0x98, 0xa1, 0xa1, 0xa4, 0xad, 0xd1, 0x8f, 0xf9, 0xa1, 0xde, 0xaf, 0x44, 0x87, 0xfe, 0xa2, 0x0f, 0xb0, 0xa1, 0x80, 0x09, 0xa1, 0xe4, 0xb2, 0xa2, 0x79, 0x3a, 0xa1, 0x95, 0xb4, 0x97, 0x72, 0x66, 0xa1, 0x8c, 0xb6, 0x23, 0x6b, 0x8d, 0xa1, 0x9c, 0xb7, 0x82, 0x64, 0xc4, 0xa1, 0xdc, 0xb8, 0xc6, 0x5e, 0x05, 0xa2, 0xb8, 0xb9, 0xb9, 0x56, 0xed, 0x9c, 0xfa, 0xa7, 0x89, 0xd9, 0xb3, 0xa0, 0x5d, 0xa6, 0x11, 0xcf, 0x03, 0xa3, 0x02, 0xa5, 0x91, 0xc6, 0xc8, 0xa5, 0x54, 0xa5, 0x25, 0xbe, 0x94, 0xa6, 0xfb, 0xa5, 0x02, 0xb6, 0x40, 0xa8, 0x6d, 0xa4, 0xfb, 0xad, 0xdf, 0xa9, 0x83, 0xa5, 0x43, 0xa5, 0x3d, 0xaa, 0x4d, 0xa5, 0xc7, 0x9c, 0xb5, 0xaa, 0xb8, 0xa6, 0xa5, 0x94, 0x51, 0xaa, 0xe9, 0xa7, 0xcc, 0x8c, 0x2a, 0xaa, 0xe4, 0xa9, 0x32, 0x84, 0x3a, 0xaa, 0xcd, 0xaa, 0xe3, 0x7c, 0xb6, 0xaa, 0x9f, 0xac, 0xfa, 0x75, 0xba, 0xaa, 0x6d, 0xae, 0xff, 0x6e, 0xbc, 0xaa, 0xb5, 0xb0, 0x5c, 0x67, 0x9a, 0xaa, 0xf5, 0xb1, 0xa8, 0x60, 0xaa, 0xab, 0xf1, 0xb3, 0x04, 0x59, 0x9d, 0xa6, 0xdc, 0xa0, 0x60, 0xdd, 0x82, 0xa9, 0xed, 0x9f, 0x63, 0xd3, 0x82, 0xac, 0x69, 0x9e, 0xc7, 0xca, 0xf4, 0xae, 0x9a, 0x9e, 0x49, 0xc2, 0xc2, 0xb0, 0x3c, 0x9e, 0x0b, 0xba, 0x76, 0xb1, 0x8d, 0x9d, 0xf4, 0xb2, 0x1c, 0xb2, 0x67, 0x9e, 0x28, 0xa9, 0x71, 0xb3, 0x0d, 0x9e, 0x76, 0xa0, 0xa9, 0xb3, 0x7b, 0x9f, 0x5a, 0x98, 0x71, 0xb3, 0xde, 0xa0, 0x41, 0x90, 0x3d, 0xb3, 0xe9, 0xa1, 0xc1, 0x88, 0x83, 0xb3, 0xd5, 0xa3, 0x30, 0x80, 0xb9, 0xb3, 0xde, 0xa5, 0x08, 0x79, 0x9d, 0xb3, 0xcf, 0xa6, 0xf0, 0x72, 0xa1, 0xb4, 0x28, 0xa8, 0xa3, 0x6b, 0x67, 0xb4, 0xac, 0xaa, 0x22, 0x64, 0x0f, 0xb5, 0x60, 0xab, 0xc9, 0x5c, 0x93, 0xb0, 0xfd, 0x99, 0x17, 0xe1, 0x79, 0xb3, 0xac, 0x98, 0x7a, 0xd8, 0x26, 0xb6, 0x0b, 0x97, 0xed, 0xcf, 0x78, 0xb8, 0x08, 0x97, 0x8a, 0xc7, 0x3d, 0xb9, 0xd2, 0x97, 0x3a, 0xbe, 0xf7, 0xbb, 0x1a, 0x97, 0x0f, 0xb6, 0x91, 0xbc, 0x2e, 0x97, 0x08, 0xae, 0x23, 0xbc, 0xb3, 0x97, 0x83, 0xa5, 0x91, 0xbd, 0x0f, 0x98, 0x2b, 0x9d, 0x1e, 0xbd, 0x2e, 0x99, 0x1c, 0x94, 0xdd, 0xbd, 0x39, 0x9a, 0x3f, 0x8c, 0xd7, 0xbd, 0x2c, 0x9b, 0xad, 0x85, 0x2b, 0xbd, 0x23, 0x9d, 0x3c, 0x7d, 0xa2, 0xbd, 0x11, 0x9e, 0xf7, 0x76, 0x69, 0xbd, 0x03, 0xa0, 0xad, 0x6f, 0x3e, 0xbe, 0x26, 0xa2, 0x57, 0x67, 0xa2, 0xbf, 0x19, 0xa3, 0xfd, 0x5f, 0xf3, 0xba, 0x14, 0x92, 0x46, 0xe6, 0x2e, 0xbc, 0xeb, 0x91, 0x9d, 0xdc, 0x96, 0xbf, 0x28, 0x91, 0x1a, 0xd3, 0xf5, 0xc1, 0x1f, 0x90, 0xba, 0xcb, 0x83, 0xc2, 0xdd, 0x90, 0x70, 0xc3, 0x30, 0xc4, 0x31, 0x90, 0x3f, 0xba, 0xe0, 0xc5, 0x3d, 0x90, 0x25, 0xb2, 0x8b, 0xc5, 0xcf, 0x90, 0x6c, 0xaa, 0x2d, 0xc6, 0x2a, 0x90, 0xda, 0xa1, 0xc9, 0xc6, 0x5f, 0x91, 0xbf, 0x99, 0xa0, 0xc6, 0x7f, 0x92, 0xb7, 0x91, 0x78, 0xc6, 0x94, 0x93, 0xf7, 0x89, 0xbc, 0xc6, 0x94, 0x95, 0x4a, 0x82, 0x13, 0xc6, 0xf7, 0x97, 0x06, 0x7a, 0xa2, 0xc7, 0x40, 0x98, 0xd3, 0x73, 0x53, 0xc7, 0xf9, 0x9a, 0xc8, 0x6b, 0xd2, 0xc9, 0x35, 0x9c, 0xd2, 0x64, 0x06, 0xc2, 0xfd, 0x8b, 0x85, 0xea, 0x48, 0xc5, 0xc9, 0x8a, 0xfe, 0xe0, 0xa8, 0xc7, 0xe5, 0x8a, 0x95, 0xd8, 0x37, 0xc9, 0xef, 0x8a, 0x2d, 0xcf, 0xdd, 0xcb, 0xaa, 0x89, 0xd0, 0xc7, 0x87, 0xcd, 0x41, 0x89, 0x80, 0xbf, 0x35, 0xce, 0x2b, 0x89, 0x75, 0xb6, 0xf3, 0xce, 0xee, 0x89, 0x81, 0xae, 0xb7, 0xcf, 0x28, 0x89, 0xf2, 0xa6, 0x95, 0xcf, 0x57, 0x8a, 0x7b, 0x9e, 0x70, 0xcf, 0x6f, 0x8b, 0x6a, 0x96, 0x4f, 0xcf, 0x8b, 0x8c, 0x5d, 0x8e, 0x46, 0xcf, 0xc5, 0x8d, 0x6f, 0x86, 0xa3, 0xd0, 0x05, 0x8e, 0x9f, 0x7f, 0x02, 0xd0, 0xdf, 0x90, 0x91, 0x77, 0x83, 0xd1, 0x86, 0x92, 0x8a, 0x70, 0x20, 0xd2, 0xeb, 0x95, 0x44, 0x68, 0x44, 0xcb, 0x68, 0x84, 0xdd, 0xed, 0x8b, 0xcd, 0xf0, 0x84, 0x77, 0xe4, 0x7d, 0xd0, 0x17, 0x84, 0x0e, 0xdc, 0x39, 0xd1, 0xf5, 0x83, 0x9b, 0xd3, 0xc6, 0xd3, 0x9b, 0x83, 0x38, 0xcb, 0x61, 0xd5, 0x0d, 0x82, 0xe4, 0xc3, 0x0c, 0xd6, 0x1a, 0x82, 0xbd, 0xba, 0xe3, 0xd6, 0xef, 0x82, 0xae, 0xb2, 0xd0, 0xd7, 0x74, 0x82, 0xfe, 0xaa, 0xe4, 0xd7, 0xcb, 0x83, 0x78, 0xa3, 0x08, 0xd8, 0x21, 0x84, 0x39, 0x9b, 0x32, 0xd8, 0x6e, 0x85, 0x21, 0x93, 0x5b, 0xd8, 0xdd, 0x86, 0x29, 0x8b, 0x87, 0xd9, 0x50, 0x87, 0x50, 0x83, 0xb3, 0xda, 0x07, 0x89, 0x18, 0x7b, 0xea, 0xda, 0xee, 0x8b, 0x40, 0x74, 0x2d, 0xdb, 0xff, 0x8d, 0x8b, 0x6c, 0x3d, 0xd2, 0xc2, 0x7e, 0x26, 0xf0, 0xf0, 0xd5, 0x63, 0x7d, 0xce, 0xe7, 0xb3, 0xd7, 0x7b, 0x7d, 0x62, 0xdf, 0x96, 0xd9, 0x7a, 0x7c, 0xd4, 0xd7, 0x1d, 0xdb, 0x5c, 0x7c, 0x50, 0xce, 0xcc, 0xdc, 0xbb, 0x7c, 0x00, 0xc6, 0xb8, 0xdd, 0xf5, 0x7b, 0xbf, 0xbe, 0xb8, 0xde, 0xbc, 0x7b, 0xc1, 0xb6, 0xe3, 0xdf, 0x75, 0x7b, 0xd6, 0xaf, 0x15, 0xdf, 0xe8, 0x7c, 0x64, 0xa7, 0x6a, 0xe0, 0x56, 0x7c, 0xf0, 0x9f, 0xbe, 0xe0, 0xc7, 0x7d, 0xd8, 0x98, 0x27, 0xe1, 0x31, 0x7e, 0xab, 0x90, 0x89, 0xe2, 0x0c, 0x7f, 0xb1, 0x88, 0x86, 0xe2, 0xe1, 0x80, 0x80, 0x80, 0x80, 0xe4, 0x46, 0x83, 0x7c, 0x78, 0xa4, 0xe5, 0x91, 0x86, 0x11, 0x70, 0xa6, 0xdb, 0xb6, 0x76, 0xaa, 0xf1, 0xf2, 0xde, 0x0e, 0x76, 0x4b, 0xe9, 0x87, 0xe0, 0x1d, 0x75, 0xde, 0xe1, 0x9e, 0xe1, 0xf0, 0x75, 0x94, 0xd9, 0xc7, 0xe3, 0x9d, 0x75, 0x5a, 0xd2, 0x09, 0xe4, 0xde, 0x75, 0x28, 0xca, 0x57, 0xe5, 0xfb, 0x74, 0xf3, 0xc2, 0xb3, 0xe6, 0xd9, 0x74, 0xf9, 0xbb, 0x31, 0xe7, 0x97, 0x75, 0x1e, 0xb3, 0xbd, 0xe8, 0x3c, 0x75, 0x8a, 0xac, 0x3f, 0xe8, 0xc4, 0x76, 0x32, 0xa4, 0xb0, 0xe9, 0x43, 0x77, 0x04, 0x9d, 0x15, 0xe9, 0xba, 0x78, 0x0f, 0x95, 0x70, 0xea, 0x51, 0x79, 0x21, 0x8d, 0xa9, 0xeb, 0x47, 0x7a, 0x51, 0x85, 0xa7, 0xec, 0x7e, 0x7b, 0xd5, 0x7d, 0x8f, 0xee, 0x51, 0x7e, 0x7a, 0x75, 0x06, 0xe5, 0x58, 0x6e, 0xca, 0xf2, 0xfa, 0xe7, 0x7d, 0x6e, 0x7e, 0xeb, 0x12, 0xe9, 0x85, 0x6e, 0x2c, 0xe3, 0x82, 0xeb, 0x28, 0x6e, 0x14, 0xdc, 0x2c, 0xec, 0x86, 0x6e, 0x1f, 0xd5, 0x09, 0xed, 0xb7, 0x6e, 0x25, 0xcd, 0xe5, 0xee, 0xad, 0x6e, 0x07, 0xc6, 0xb4, 0xef, 0x97, 0x6d, 0xf3, 0xbf, 0x94, 0xf0, 0x49, 0x6e, 0x35, 0xb8, 0x81, 0xf0, 0xf0, 0x6e, 0x7c, 0xb1, 0x66, 0xf1, 0x7f, 0x6f, 0x20, 0xaa, 0x01, 0xf1, 0xff, 0x6f, 0xdc, 0xa2, 0x79, 0xf2, 0x85, 0x71, 0x03, 0x9a, 0xcf, 0xf3, 0x11, 0x72, 0x36, 0x93, 0x1a, 0xf3, 0xf7, 0x73, 0xaf, 0x8b, 0x19, 0xf5, 0x2a, 0x75, 0x07, 0x82, 0xf4, 0xf9, 0x73, 0x77, 0x19, 0x79, 0x69, 0x73, 0x33, 0xd2, 0xf4, 0xc1, 0x26, 0x74, 0x02, 0xd4, 0x02, 0xb9, 0xe6, 0x74, 0x94, 0xd5, 0x4f, 0xb2, 0xe6, 0x74, 0x6f, 0xd6, 0xfe, 0xab, 0x77, 0x74, 0x32, 0xd8, 0x88, 0xa3, 0xe1, 0x73, 0xab, 0xda, 0x0a, 0x9c, 0x0b, 0x73, 0x08, 0xdb, 0x44, 0x93, 0xd6, 0x72, 0x48, 0xdc, 0x44, 0x8b, 0xfc, 0x71, 0x76, 0xdd, 0x0c, 0x84, 0x61, 0x70, 0xad, 0xdd, 0xac, 0x7d, 0x38, 0x6f, 0xf2, 0xde, 0x30, 0x76, 0x79, 0x6f, 0x3e, 0xde, 0xa7, 0x6f, 0xc3, 0x6e, 0x8e, 0xdf, 0x12, 0x69, 0xc5, 0x6d, 0xdf, 0xdf, 0x6e, 0x63, 0xc3, 0x6d, 0x44, 0xdf, 0x87, 0x5d, 0x9b, 0x6c, 0xc9, 0xdf, 0x43, 0x57, 0x3c, 0x6c, 0x4c, 0xde, 0xff, 0x50, 0xb8, 0x78, 0x99, 0xce, 0x7c, 0xc3, 0x0f, 0x79, 0xf8, 0xcf, 0x01, 0xbb, 0x84, 0x7a, 0xed, 0xd0, 0x00, 0xb4, 0x7c, 0x7b, 0x70, 0xd1, 0x2f, 0xad, 0x47, 0x7b, 0x4b, 0xd2, 0x98, 0xa5, 0x78, 0x7b, 0x13, 0xd4, 0x09, 0x9d, 0x99, 0x7a, 0xdf, 0xd5, 0x54, 0x95, 0x4f, 0x7a, 0x73, 0xd6, 0x90, 0x8d, 0x3b, 0x79, 0xd4, 0xd7, 0xa3, 0x85, 0x76, 0x79, 0x1d, 0xd8, 0x9a, 0x7e, 0x1c, 0x78, 0x60, 0xd9, 0x6f, 0x77, 0x75, 0x77, 0xa0, 0xda, 0x2e, 0x70, 0xc2, 0x77, 0x01, 0xda, 0xd8, 0x6a, 0xcd, 0x76, 0x62, 0xdb, 0x6f, 0x64, 0xe4, 0x75, 0xc4, 0xdb, 0xd4, 0x5e, 0xdf, 0x75, 0x3e, 0xdb, 0x81, 0x58, 0x52, 0x74, 0xb5, 0xdb, 0x2e, 0x51, 0xa3, 0x7d, 0xb3, 0xca, 0x69, 0xc5, 0xb2, 0x7f, 0x5c, 0xca, 0x76, 0xbd, 0x99, 0x80, 0xa2, 0xcb, 0x25, 0xb6, 0x5c, 0x81, 0xc1, 0xcb, 0xfd, 0xaf, 0x33, 0x81, 0xfe, 0xcd, 0x2f, 0xa7, 0x42, 0x82, 0x0b, 0xce, 0x77, 0x9f, 0x5e, 0x82, 0x29, 0xcf, 0xd7, 0x96, 0xf3, 0x82, 0x18, 0xd1, 0x2d, 0x8e, 0xb3, 0x81, 0xb8, 0xd2, 0x66, 0x86, 0xa9, 0x81, 0x32, 0xd3, 0x8d, 0x7e, 0xf8, 0x80, 0x7d, 0xd4, 0x99, 0x78, 0x69, 0x7f, 0xc0, 0xd5, 0x93, 0x71, 0xcd, 0x7f, 0x23, 0xd6, 0x7e, 0x6b, 0xce, 0x7e, 0x8c, 0xd7, 0x52, 0x65, 0xf5, 0x7d, 0xf0, 0xd8, 0x13, 0x60, 0x1c, 0x7d, 0x5e, 0xd7, 0xb0, 0x59, 0x73, 0x7c, 0xd2, 0xd7, 0x46, 0x52, 0x9f, 0x83, 0x3e, 0xc6, 0x73, 0xc8, 0xb6, 0x85, 0x2e, 0xc6, 0x4f, 0xc0, 0x24, 0x86, 0xa7, 0xc6, 0xc5, 0xb8, 0xb0, 0x87, 0xff, 0xc7, 0x49, 0xb1, 0x51, 0x88, 0xc1, 0xc8, 0x3e, 0xa9, 0x7c, 0x89, 0x43, 0xc9, 0x59, 0xa1, 0xa6, 0x89, 0x84, 0xca, 0x80, 0x99, 0x5f, 0x89, 0x9d, 0xcb, 0xb9, 0x90, 0xfd, 0x89, 0x68, 0xcd, 0x07, 0x88, 0xdb, 0x89, 0x15, 0xce, 0x66, 0x80, 0xb0, 0x88, 0x76, 0xcf, 0xd0, 0x79, 0xec, 0x87, 0xc6, 0xd0, 0xf9, 0x73, 0x5d, 0x87, 0x22, 0xd2, 0x0e, 0x6d, 0x28, 0x86, 0x8f, 0xd3, 0x16, 0x67, 0x42, 0x85, 0xfa, 0xd4, 0x0d, 0x61, 0x5e, 0x85, 0x8e, 0xd3, 0xfd, 0x5a, 0xd4, 0x85, 0x2f, 0xd3, 0x90, 0x53, 0xe8, 0x89, 0x5b, 0xc2, 0x6e, 0xcb, 0xd2, 0x8b, 0x27, 0xc2, 0x3d, 0xc3, 0x24, 0x8c, 0xc0, 0xc2, 0x54, 0xbb, 0x30, 0x8e, 0x2e, 0xc2, 0x9a, 0xb3, 0x8a, 0x8f, 0x4a, 0xc3, 0x38, 0xab, 0xb8, 0x90, 0x19, 0xc4, 0x25, 0xa3, 0xcb, 0x90, 0xa0, 0xc5, 0x20, 0x9b, 0xb1, 0x90, 0xdf, 0xc6, 0x1e, 0x93, 0x56, 0x90, 0xe0, 0xc7, 0x34, 0x8b, 0x40, 0x90, 0xb6, 0xc8, 0x5a, 0x83, 0x3f, 0x90, 0x56, 0xc9, 0xc3, 0x7c, 0x1e, 0x8f, 0xcb, 0xcb, 0x3b, 0x75, 0x7f, 0x8f, 0x2d, 0xcc, 0xa2, 0x6e, 0xf6, 0x8e, 0x8b, 0xce, 0x07, 0x68, 0xd0, 0x8d, 0xd3, 0xcf, 0x63, 0x62, 0xbc, 0x8d, 0x87, 0xcf, 0xec, 0x5c, 0x54, 0x8d, 0xa3, 0xcf, 0xbb, 0x55, 0x2b, 0x8f, 0xf0, 0xbd, 0xde, 0xcf, 0x18, 0x91, 0xeb, 0xbd, 0x92, 0xc6, 0x70, 0x93, 0xce, 0xbd, 0x79, 0xbe, 0x44, 0x95, 0x5c, 0xbd, 0xcb, 0xb6, 0x7e, 0x96, 0xbc, 0xbe, 0x35, 0xae, 0xb8, 0x97, 0x81, 0xbf, 0x17, 0xa6, 0x8e, 0x98, 0x12, 0xbf, 0xff, 0x9e, 0x63, 0x98, 0x5e, 0xc0, 0xb7, 0x96, 0x11, 0x98, 0x84, 0xc1, 0x85, 0x8d, 0xec, 0x98, 0x69, 0xc2, 0x80, 0x86, 0x1c, 0x98, 0x33, 0xc3, 0xa4, 0x7e, 0x99, 0x97, 0xcd, 0xc5, 0x2c, 0x78, 0x04, 0x97, 0x53, 0xc6, 0xa5, 0x71, 0x69, 0x96, 0xf4, 0xc7, 0xfa, 0x6b, 0x13, 0x96, 0x87, 0xc9, 0x4c, 0x64, 0xd3, 0x96, 0x21, 0xca, 0x79, 0x5e, 0x7d, 0x96, 0x3d, 0xca, 0xe5, 0x57, 0x63, 0x96, 0xfd, 0xb9, 0x26, 0xd2, 0xc3, 0x99, 0x60, 0xb8, 0x88, 0xca, 0x22, 0x9b, 0x75, 0xb8, 0x31, 0xc2, 0x00, 0x9d, 0x21, 0xb8, 0x40, 0xba, 0x1a, 0x9e, 0x9a, 0xb8, 0x6e, 0xb2, 0x4a, 0x9f, 0xa4, 0xb8, 0xe7, 0xaa, 0x26, 0xa0, 0x71, 0xb9, 0x80, 0xa1, 0xee, 0xa0, 0xac, 0xba, 0x3c, 0x99, 0x8a, 0xa0, 0xbb, 0xba, 0xfe, 0x91, 0x18, 0xa0, 0x97, 0xbc, 0x21, 0x89, 0x2d, 0xa0, 0x57, 0xbd, 0x3b, 0x81, 0x3b, 0x9f, 0xc2, 0xbf, 0x03, 0x7a, 0x43, 0x9f, 0x34, 0xc0, 0xb2, 0x73, 0x94, 0x9e, 0xee, 0xc2, 0x0d, 0x6d, 0x25, 0x9e, 0xce, 0xc3, 0x43, 0x66, 0xd7, 0x9e, 0x9a, 0xc4, 0x84, 0x60, 0x96, 0x9e, 0xd5, 0xc5, 0x50, 0x59, 0x82, 0x9e, 0x95, 0xb3, 0xc6, 0xd6, 0x8a, 0xa0, 0xe2, 0xb3, 0x00, 0xcd, 0xe6, 0xa3, 0x16, 0xb2, 0xb4, 0xc5, 0xfa, 0xa5, 0x09, 0xb2, 0x7c, 0xbe, 0x1c, 0xa6, 0x88, 0xb2, 0x8b, 0xb6, 0x2d, 0xa7, 0xd1, 0xb2, 0xab, 0xae, 0x28, 0xa8, 0xb3, 0xb3, 0x0a, 0xa5, 0xc4, 0xa9, 0x3f, 0xb3, 0x8c, 0x9d, 0x5c, 0xa9, 0x3c, 0xb4, 0x5a, 0x94, 0xe6, 0xa9, 0x25, 0xb5, 0x54, 0x8c, 0xc1, 0xa8, 0xf0, 0xb6, 0x82, 0x84, 0xf9, 0xa8, 0xac, 0xb7, 0xf5, 0x7d, 0x9e, 0xa8, 0x42, 0xb9, 0xd5, 0x76, 0xdd, 0xa7, 0xb1, 0xbb, 0xad, 0x70, 0x0c, 0xa7, 0x98, 0xbc, 0xf8, 0x69, 0x5c, 0xa7, 0x6a, 0xbe, 0x4a, 0x62, 0xc2, 0xa7, 0x86, 0xbf, 0x4a, 0x5b, 0xc5, 0xa7, 0xd2, 0xad, 0xce, 0xdb, 0x1e, 0xaa, 0x4c, 0xad, 0x2a, 0xd2, 0x1c, 0xac, 0x52, 0xac, 0xbe, 0xca, 0x15, 0xae, 0x25, 0xac, 0x59, 0xc2, 0x3e, 0xaf, 0x77, 0xac, 0x3b, 0xba, 0x3e, 0xb0, 0x8a, 0xac, 0x3e, 0xb2, 0x30, 0xb1, 0x47, 0xac, 0x7d, 0xa9, 0xbc, 0xb1, 0xda, 0xac, 0xd1, 0xa1, 0x25, 0xb1, 0xed, 0xad, 0x94, 0x98, 0xa2, 0xb1, 0xdb, 0xae, 0x63, 0x90, 0x13, 0xb1, 0x5c, 0xaf, 0x8d, 0x88, 0x05, 0xb0, 0xe8, 0xb0, 0xa9, 0x80, 0x13, 0xb0, 0xe1, 0xb2, 0x92, 0x79, 0xa1, 0xb0, 0xb0, 0xb4, 0x71, 0x73, 0x16, 0xb0, 0xa4, 0xb5, 0xf3, 0x6c, 0x54, 0xb0, 0xc2, 0xb7, 0x2c, 0x65, 0x84, 0xb0, 0xe6, 0xb8, 0x65, 0x5e, 0x8f, 0xb1, 0xc4, 0xa6, 0xb7, 0xdf, 0xa8, 0xb4, 0x14, 0xa6, 0x53, 0xd6, 0xaf, 0xb6, 0x03, 0xa5, 0xf3, 0xce, 0x79, 0xb7, 0x87, 0xa5, 0xaa, 0xc6, 0x8a, 0xb8, 0xd7, 0xa5, 0x76, 0xbe, 0x89, 0xb9, 0xbb, 0xa5, 0x72, 0xb6, 0x4a, 0xba, 0x75, 0xa5, 0x8c, 0xad, 0xf2, 0xba, 0xc8, 0xa6, 0x08, 0xa5, 0x4f, 0xba, 0xf3, 0xa6, 0xa7, 0x9c, 0xca, 0xba, 0xe7, 0xa7, 0x77, 0x94, 0x6c, 0xba, 0xc2, 0xa8, 0x8a, 0x8c, 0x61, 0xba, 0x78, 0xa9, 0xda, 0x84, 0xa8, 0xba, 0x2d, 0xab, 0x55, 0x7d, 0x3a, 0xb9, 0xe7, 0xad, 0x21, 0x76, 0x50, 0xb9, 0x95, 0xae, 0xf1, 0x6f, 0x6d, 0xb9, 0xd2, 0xb0, 0x4b, 0x68, 0x41, 0xba, 0x00, 0xb1, 0x94, 0x61, 0x25, 0xbb, 0x8b, 0x9f, 0xf7, 0xe4, 0x7b, 0xbd, 0xba, 0x9f, 0x71, 0xdb, 0xaa, 0xbf, 0x88, 0x9f, 0x00, 0xd3, 0x7a, 0xc0, 0xe4, 0x9e, 0xc2, 0xcb, 0x47, 0xc1, 0xfd, 0x9e, 0xa2, 0xc3, 0x09, 0xc2, 0xdd, 0x9e, 0x84, 0xba, 0xb1, 0xc3, 0x96, 0x9e, 0x68, 0xb2, 0x47, 0xc3, 0xdc, 0x9e, 0xcc, 0xa9, 0xbd, 0xc3, 0xf7, 0x9f, 0x5b, 0xa1, 0x21, 0xc3, 0xcc, 0xa0, 0x25, 0x98, 0xb8, 0xc3, 0xa2, 0xa0, 0xfb, 0x90, 0x5a, 0xc3, 0x88, 0xa2, 0x6d, 0x88, 0xd6, 0xc3, 0x58, 0xa3, 0xd5, 0x81, 0x47, 0xc3, 0x32, 0xa5, 0x6b, 0x7a, 0x28, 0xc2, 0xfd, 0xa7, 0x0b, 0x73, 0x26, 0xc3, 0x45, 0xa8, 0xa5, 0x6b, 0xea, 0xc3, 0xd2, 0xaa, 0x29, 0x64, 0x58, 0xc4, 0x88, 0x99, 0x4e, 0xe8, 0xc6, 0xc6, 0xc7, 0x98, 0xc7, 0xdf, 0xe6, 0xc8, 0x59, 0x98, 0x7a, 0xd7, 0xc0, 0xc9, 0xda, 0x98, 0x33, 0xcf, 0xa8, 0xcb, 0x1c, 0x98, 0x04, 0xc7, 0x75, 0xcc, 0x47, 0x97, 0xd9, 0xbf, 0x44, 0xcc, 0xd5, 0x97, 0xdf, 0xb6, 0xdc, 0xcd, 0x44, 0x97, 0xfa, 0xae, 0x75, 0xcd, 0x45, 0x98, 0x66, 0xa6, 0x0a, 0xcd, 0x3d, 0x98, 0xf4, 0x9d, 0xac, 0xcd, 0x22, 0x99, 0xd1, 0x95, 0x71, 0xcc, 0xfe, 0x9a, 0xd7, 0x8d, 0x67, 0xcc, 0xc2, 0x9c, 0x31, 0x85, 0xb9, 0xcc, 0x83, 0x9d, 0xa8, 0x7e, 0x21, 0xcc, 0x54, 0x9f, 0x4a, 0x76, 0xe1, 0xcc, 0x1c, 0xa0, 0xdf, 0x6f, 0xb5, 0xcd, 0x81, 0xa2, 0xc0, 0x68, 0x02, 0xcc, 0x9f, 0x92, 0xad, 0xec, 0xb7, 0xce, 0xbb, 0x92, 0x15, 0xe3, 0xe7, 0xd0, 0x75, 0x91, 0xa5, 0xdb, 0xb5, 0xd1, 0xf2, 0x91, 0x5e, 0xd3, 0x96, 0xd3, 0x41, 0x91, 0x21, 0xcb, 0x68, 0xd4, 0x68, 0x90, 0xf0, 0xc3, 0x2a, 0xd5, 0x1f, 0x90, 0xf2, 0xba, 0xdc, 0xd5, 0x96, 0x91, 0x14, 0xb2, 0x84, 0xd5, 0xcc, 0x91, 0x62, 0xaa, 0x52, 0xd5, 0xe1, 0x91, 0xc1, 0xa2, 0x2c, 0xd5, 0xec, 0x92, 0x7d, 0x9a, 0x16, 0xd5, 0xeb, 0x93, 0x51, 0x92, 0x00, 0xd5, 0xed, 0x94, 0x7b, 0x8a, 0x3a, 0xd5, 0xdf, 0x95, 0xc2, 0x82, 0x87, 0xd6, 0x35, 0x97, 0x70, 0x7b, 0x08, 0xd6, 0x88, 0x99, 0x34, 0x73, 0xa9, 0xd7, 0x3d, 0x9b, 0x33, 0x6c, 0x01, 0xd4, 0x3e, 0x8c, 0x2f, 0xf0, 0x64, 0xd6, 0x46, 0x8b, 0xb9, 0xe7, 0x98, 0xd7, 0xfb, 0x8b, 0x52, 0xdf, 0x77, 0xd9, 0x85, 0x8a, 0xf5, 0xd7, 0x40, 0xda, 0xfe, 0x8a, 0x99, 0xcf, 0x10, 0xdc, 0x1a, 0x8a, 0x63, 0xc6, 0xdf, 0xdd, 0x16, 0x8a, 0x3a, 0xbe, 0xb4, 0xdd, 0x9a, 0x8a, 0x51, 0xb6, 0x8b, 0xde, 0x0f, 0x8a, 0x78, 0xae, 0x74, 0xde, 0x50, 0x8a, 0xdd, 0xa6, 0x98, 0xde, 0x8e, 0x8b, 0x51, 0x9e, 0xbe, 0xde, 0xc0, 0x8c, 0x19, 0x96, 0xe7, 0xde, 0xe8, 0x8c, 0xeb, 0x8f, 0x05, 0xde, 0xed, 0x8e, 0x29, 0x87, 0x07, 0xde, 0xe5, 0x8f, 0x82, 0x7f, 0x0e, 0xdf, 0xbb, 0x91, 0x7d, 0x77, 0x6c, 0xe0, 0x82, 0x93, 0x6e, 0x6f, 0xb6, 0xda, 0xf7, 0x85, 0xe1, 0xf3, 0xba, 0xdd, 0x16, 0x85, 0x8a, 0xeb, 0x45, 0xde, 0xc9, 0x85, 0x10, 0xe3, 0x33, 0xe0, 0x66, 0x84, 0x93, 0xda, 0xf2, 0xe1, 0xe7, 0x84, 0x29, 0xd2, 0x92, 0xe3, 0x18, 0x83, 0xe8, 0xca, 0x6c, 0xe4, 0x25, 0x83, 0xb8, 0xc2, 0x68, 0xe4, 0xec, 0x83, 0xb1, 0xba, 0x75, 0xe5, 0x99, 0x83, 0xb8, 0xb2, 0x8b, 0xe6, 0x13, 0x84, 0x03, 0xaa, 0xc3, 0xe6, 0x74, 0x84, 0x69, 0xa3, 0x04, 0xe6, 0xd8, 0x85, 0x0e, 0x9b, 0x4e, 0xe7, 0x3d, 0x85, 0xd8, 0x93, 0x99, 0xe7, 0xc8, 0x86, 0xcd, 0x8b, 0xc5, 0xe8, 0x66, 0x87, 0xef, 0x83, 0xd9, 0xe9, 0x6d, 0x89, 0xcd, 0x7b, 0xea, 0xea, 0xe9, 0x8c, 0x20, 0x73, 0xce, 0xe1, 0xf4, 0x7f, 0xb4, 0xf7, 0x2c, 0xe3, 0x4d, 0x7f, 0x93, 0xee, 0xeb, 0xe5, 0x3f, 0x7e, 0xf2, 0xe6, 0xae, 0xe6, 0xf8, 0x7e, 0x71, 0xde, 0x80, 0xe8, 0xaf, 0x7d, 0xf5, 0xd6, 0x0c, 0xea, 0x2c, 0x7d, 0x93, 0xcd, 0xde, 0xeb, 0x4c, 0x7d, 0x56, 0xc6, 0x1b, 0xec, 0x5a, 0x7d, 0x25, 0xbe, 0x69, 0xed, 0x2a, 0x7d, 0x24, 0xb6, 0xc9, 0xed, 0xef, 0x7d, 0x2d, 0xaf, 0x2d, 0xee, 0x57, 0x7d, 0x99, 0xa7, 0x7c, 0xee, 0xbd, 0x7d, 0xff, 0x9f, 0xc7, 0xef, 0x21, 0x7e, 0xbd, 0x98, 0x02, 0xef, 0x7f, 0x7f, 0x68, 0x90, 0x39, 0xf0, 0x7d, 0x80, 0x1a, 0x88, 0x5d, 0xf1, 0x70, 0x80, 0x80, 0x80, 0x80, 0xf5, 0xd4, 0x83, 0xf5, 0x77, 0xa1, 0xeb, 0x5e, 0x77, 0xab, 0xf8, 0x75, 0xec, 0xd8, 0x77, 0xad, 0xf0, 0x4e, 0xee, 0x7d, 0x77, 0x4b, 0xe8, 0x6c, 0xf0, 0x07, 0x76, 0xee, 0xe0, 0x96, 0xf1, 0x64, 0x76, 0xbf, 0xd8, 0xe5, 0xf2, 0xaa, 0x76, 0x99, 0xd1, 0x47, 0xf3, 0xbf, 0x76, 0x67, 0xc9, 0xf1, 0xf4, 0xce, 0x76, 0x3d, 0xc2, 0xb9, 0xf5, 0xae, 0x76, 0x44, 0xbb, 0x77, 0xf6, 0x6e, 0x76, 0x64, 0xb4, 0x26, 0xf7, 0x13, 0x76, 0xc8, 0xac, 0xb6, 0xf7, 0xa2, 0x77, 0x72, 0xa5, 0x21, 0xf8, 0x32, 0x78, 0x2c, 0x9d, 0x7b, 0xf8, 0xc1, 0x78, 0xfb, 0x95, 0xc0, 0xf9, 0x69, 0x79, 0xbe, 0x8d, 0xe4, 0xfa, 0x6b, 0x7a, 0x5e, 0x85, 0xc8, 0xfe, 0x21, 0x7b, 0xb4, 0x7c, 0x51, 0x7b, 0xa9, 0xd7, 0x96, 0xc5, 0x82, 0x7c, 0xcd, 0xd7, 0x4a, 0xbc, 0xf6, 0x7d, 0x5a, 0xd8, 0x5c, 0xb6, 0x02, 0x7d, 0xb7, 0xd9, 0xb2, 0xaf, 0x33, 0x7d, 0x3f, 0xdb, 0x77, 0xa7, 0xb4, 0x7c, 0x73, 0xdd, 0x49, 0xa0, 0x55, 0x7b, 0xc6, 0xde, 0xb2, 0x98, 0x68, 0x7a, 0xe1, 0xdf, 0xfd, 0x90, 0x85, 0x79, 0xe2, 0xe1, 0x3c, 0x89, 0x2d, 0x78, 0xc5, 0xe2, 0x28, 0x81, 0xb5, 0x77, 0xea, 0xe2, 0xad, 0x7b, 0x0b, 0x77, 0x15, 0xe2, 0xf2, 0x74, 0x7a, 0x76, 0x4c, 0xe3, 0x32, 0x6e, 0x32, 0x75, 0xa4, 0xe3, 0x80, 0x68, 0x64, 0x75, 0x02, 0xe3, 0xd6, 0x62, 0x92, 0x74, 0x45, 0xe3, 0xb4, 0x5c, 0x76, 0x73, 0x92, 0xe3, 0x3c, 0x56, 0x0f, 0x81, 0xaf, 0xd2, 0xb2, 0xc6, 0x8c, 0x83, 0x36, 0xd2, 0x9d, 0xbe, 0x8a, 0x83, 0xed, 0xd3, 0xb7, 0xb7, 0xaf, 0x84, 0x6f, 0xd4, 0xd5, 0xb0, 0xeb, 0x84, 0x43, 0xd6, 0x3c, 0xa9, 0x7d, 0x83, 0xdd, 0xd7, 0xce, 0xa2, 0x0e, 0x83, 0x76, 0xd9, 0x33, 0x9a, 0x29, 0x82, 0xee, 0xda, 0x78, 0x92, 0x1a, 0x82, 0x1e, 0xdb, 0x8a, 0x8a, 0x6f, 0x81, 0x2e, 0xdc, 0x82, 0x82, 0xec, 0x80, 0x3f, 0xdd, 0x4a, 0x7c, 0x12, 0x7f, 0x54, 0xdd, 0xfe, 0x75, 0x80, 0x7e, 0x76, 0xde, 0xa2, 0x6f, 0x14, 0x7d, 0xc4, 0xdf, 0x1f, 0x69, 0x49, 0x7d, 0x25, 0xdf, 0x9b, 0x63, 0x84, 0x7c, 0x66, 0xdf, 0xae, 0x5d, 0x7f, 0x7b, 0xe7, 0xdf, 0x65, 0x57, 0x45, 0x87, 0x61, 0xce, 0x95, 0xc8, 0x8d, 0x89, 0x4e, 0xce, 0x81, 0xc0, 0xc6, 0x8a, 0x4a, 0xcf, 0x61, 0xb9, 0xb3, 0x8b, 0x0e, 0xd0, 0x65, 0xb2, 0xc4, 0x8b, 0x55, 0xd1, 0x96, 0xab, 0x80, 0x8b, 0x33, 0xd2, 0xf0, 0xa4, 0x04, 0x8a, 0xf8, 0xd4, 0x4e, 0x9c, 0x42, 0x8a, 0x9e, 0xd5, 0x8f, 0x94, 0x0d, 0x8a, 0x0e, 0xd6, 0xbc, 0x8c, 0x24, 0x89, 0x44, 0xd7, 0xcc, 0x84, 0x6f, 0x88, 0x67, 0xd8, 0xc7, 0x7d, 0x55, 0x87, 0x82, 0xd9, 0xa9, 0x76, 0xd8, 0x86, 0x9c, 0xda, 0x7c, 0x70, 0x57, 0x85, 0xf2, 0xdb, 0x40, 0x6a, 0x7d, 0x85, 0x53, 0xdb, 0xf8, 0x64, 0xac, 0x84, 0xbd, 0xdc, 0x74, 0x5e, 0xcd, 0x84, 0x90, 0xdc, 0x56, 0x58, 0xb2, 0x8d, 0x56, 0xca, 0xaa, 0xcb, 0x53, 0x8f, 0x25, 0xca, 0x94, 0xc3, 0x7d, 0x90, 0x96, 0xcb, 0x20, 0xbc, 0x1c, 0x91, 0x83, 0xcc, 0x00, 0xb4, 0xf3, 0x92, 0x53, 0xcd, 0x0a, 0xad, 0xca, 0x92, 0x70, 0xce, 0x22, 0xa6, 0x29, 0x92, 0x78, 0xcf, 0x51, 0x9e, 0x88, 0x92, 0x31, 0xd0, 0x8d, 0x96, 0x46, 0x91, 0xc1, 0xd1, 0xbb, 0x8e, 0x26, 0x91, 0x20, 0xd2, 0xd8, 0x86, 0x56, 0x90, 0x6b, 0xd3, 0xf5, 0x7e, 0xd4, 0x8f, 0x8f, 0xd4, 0xfd, 0x78, 0x5d, 0x8e, 0xae, 0xd5, 0xf8, 0x71, 0xe4, 0x8d, 0xfe, 0xd6, 0xfe, 0x6b, 0xe0, 0x8d, 0x67, 0xd7, 0xfe, 0x66, 0x03, 0x8c, 0xc4, 0xd8, 0xec, 0x60, 0x29, 0x8c, 0xe9, 0xd8, 0xfb, 0x5a, 0x08, 0x93, 0xc0, 0xc7, 0x0c, 0xce, 0x33, 0x95, 0x9a, 0xc7, 0x34, 0xc6, 0x55, 0x97, 0x43, 0xc7, 0x7f, 0xbe, 0xbf, 0x98, 0x5e, 0xc8, 0x25, 0xb7, 0x7b, 0x99, 0x7e, 0xc8, 0xd3, 0xb0, 0x60, 0x99, 0xc6, 0xc9, 0x94, 0xa8, 0xb0, 0x99, 0xe5, 0xca, 0x4e, 0xa1, 0x0c, 0x99, 0xcd, 0xcb, 0x47, 0x99, 0x0c, 0x99, 0xba, 0xcc, 0x6f, 0x91, 0x19, 0x99, 0x1c, 0xcd, 0x7d, 0x89, 0x58, 0x98, 0x8b, 0xce, 0xaa, 0x81, 0xa2, 0x97, 0xc6, 0xcf, 0xe8, 0x7a, 0xce, 0x96, 0xec, 0xd0, 0xf8, 0x74, 0x25, 0x96, 0x44, 0xd2, 0x1c, 0x6d, 0xd6, 0x95, 0xbe, 0xd3, 0x4a, 0x67, 0xda, 0x95, 0x28, 0xd4, 0x6d, 0x61, 0xe1, 0x95, 0x90, 0xd5, 0x3c, 0x5b, 0xad, 0x9a, 0x7a, 0xc3, 0x88, 0xd1, 0x3b, 0x9c, 0x67, 0xc3, 0x77, 0xc9, 0x3a, 0x9e, 0x33, 0xc3, 0x92, 0xc1, 0xa7, 0x9f, 0x82, 0xc3, 0xf3, 0xba, 0x41, 0xa0, 0xb9, 0xc4, 0x66, 0xb3, 0x05, 0xa1, 0x5a, 0xc4, 0xe7, 0xab, 0x77, 0xa1, 0x89, 0xc5, 0x72, 0xa3, 0xac, 0xa1, 0x82, 0xc6, 0x17, 0x9b, 0xc2, 0xa1, 0x6b, 0xc6, 0xf0, 0x93, 0xc8, 0xa0, 0xee, 0xc7, 0xc7, 0x8b, 0xfd, 0xa0, 0x5a, 0xc8, 0xb6, 0x84, 0x65, 0x9f, 0xc9, 0xc9, 0xdf, 0x7d, 0x53, 0x9f, 0x1b, 0xcb, 0x28, 0x76, 0xb9, 0x9e, 0x7d, 0xcc, 0x74, 0x70, 0x30, 0x9e, 0x19, 0xcd, 0xc2, 0x6a, 0x03, 0x9d, 0xa3, 0xcf, 0x1a, 0x63, 0xe2, 0x9d, 0x86, 0xd0, 0x86, 0x5d, 0xac, 0xa1, 0xa9, 0xbf, 0x34, 0xd4, 0x5f, 0xa3, 0xe8, 0xbe, 0xe2, 0xcc, 0x8a, 0xa5, 0xcf, 0xbe, 0xce, 0xc5, 0x2e, 0xa7, 0x72, 0xbe, 0xde, 0xbd, 0xd8, 0xa8, 0x89, 0xbf, 0x36, 0xb6, 0x5b, 0xa9, 0x6f, 0xbf, 0x9c, 0xae, 0xd5, 0xa9, 0xa7, 0xc0, 0x3c, 0xa6, 0xe8, 0xa9, 0xb1, 0xc0, 0xcc, 0x9e, 0xfd, 0xa9, 0x5f, 0xc1, 0x7f, 0x96, 0xe0, 0xa9, 0x09, 0xc2, 0x46, 0x8e, 0xe5, 0xa8, 0x72, 0xc3, 0x31, 0x87, 0x45, 0xa7, 0xf1, 0xc4, 0x3c, 0x7f, 0xcd, 0xa7, 0x42, 0xc5, 0x9e, 0x79, 0x1c, 0xa6, 0x94, 0xc6, 0xfc, 0x72, 0x7b, 0xa6, 0x3b, 0xc8, 0x48, 0x6c, 0x1f, 0xa5, 0xfe, 0xc9, 0x8b, 0x65, 0xed, 0xa6, 0x1e, 0xcb, 0x28, 0x60, 0x00, 0xaa, 0x60, 0xba, 0x02, 0xd8, 0x8e, 0xac, 0x99, 0xb9, 0x7f, 0xd0, 0xa2, 0xae, 0x79, 0xb9, 0x6d, 0xc9, 0x3e, 0xb0, 0x0d, 0xb9, 0x4c, 0xc1, 0xec, 0xb1, 0x18, 0xb9, 0x79, 0xba, 0x58, 0xb1, 0xd2, 0xb9, 0xaf, 0xb2, 0xa9, 0xb2, 0x33, 0xba, 0x19, 0xaa, 0xb4, 0xb2, 0x4a, 0xba, 0x8f, 0xa2, 0x96, 0xb2, 0x02, 0xbb, 0x44, 0x9a, 0x71, 0xb1, 0x8f, 0xbc, 0x10, 0x92, 0x49, 0xb0, 0xfd, 0xbd, 0x13, 0x8a, 0xa1, 0xb0, 0x64, 0xbe, 0x2c, 0x83, 0x1f, 0xaf, 0xba, 0xbf, 0xa9, 0x7c, 0x05, 0xae, 0xfe, 0xc1, 0x2b, 0x75, 0x1a, 0xae, 0x73, 0xc2, 0x8b, 0x6e, 0x65, 0xae, 0x4b, 0xc3, 0xa2, 0x67, 0xf3, 0xae, 0x4e, 0xc4, 0xf1, 0x61, 0xa5, 0xb3, 0xd2, 0xb4, 0x4f, 0xdc, 0xdb, 0xb5, 0xc1, 0xb4, 0x0d, 0xd5, 0x02, 0xb7, 0xc0, 0xb4, 0x11, 0xcd, 0x7b, 0xb8, 0xd8, 0xb3, 0xe3, 0xc5, 0xf7, 0xb9, 0xd6, 0xb3, 0xd8, 0xbe, 0x69, 0xba, 0x60, 0xb4, 0x08, 0xb6, 0x89, 0xba, 0xcf, 0xb4, 0x47, 0xae, 0x9c, 0xba, 0xc9, 0xb4, 0xb7, 0xa6, 0x34, 0xba, 0xae, 0xb5, 0x41, 0x9d, 0xdc, 0xba, 0x2b, 0xb5, 0xe2, 0x95, 0x7a, 0xb9, 0xa3, 0xb6, 0xb3, 0x8d, 0x6f, 0xb9, 0x06, 0xb7, 0xbf, 0x85, 0xeb, 0xb8, 0x72, 0xb8, 0xf3, 0x7e, 0xa1, 0xb7, 0xdd, 0xba, 0x7d, 0x77, 0xe7, 0xb7, 0x3a, 0xbc, 0x0a, 0x71, 0x1f, 0xb6, 0xfb, 0xbd, 0x55, 0x6a, 0x4f, 0xb6, 0xd0, 0xbe, 0xab, 0x63, 0x83, 0xbd, 0xb6, 0xae, 0x1c, 0xe1, 0xc1, 0xbf, 0x67, 0xad, 0xcd, 0xd9, 0xbe, 0xc1, 0x06, 0xad, 0xa9, 0xd2, 0x17, 0xc2, 0x03, 0xad, 0x97, 0xca, 0x63, 0xc2, 0xd4, 0xad, 0x93, 0xc2, 0xb0, 0xc3, 0x53, 0xad, 0xb8, 0xba, 0xbf, 0xc3, 0x9c, 0xad, 0xed, 0xb2, 0xaa, 0xc3, 0x89, 0xae, 0x4c, 0xaa, 0x3b, 0xc3, 0x55, 0xae, 0xc7, 0xa1, 0xa1, 0xc2, 0xe5, 0xaf, 0x66, 0x99, 0x2e, 0xc2, 0x6c, 0xb0, 0x0a, 0x90, 0xbb, 0xc1, 0xec, 0xb1, 0x1c, 0x89, 0x1a, 0xc1, 0x5c, 0xb2, 0x2e, 0x81, 0x86, 0xc0, 0xe6, 0xb3, 0x98, 0x7a, 0xb6, 0xc0, 0x61, 0xb5, 0x07, 0x73, 0xff, 0xc0, 0x07, 0xb6, 0x64, 0x6d, 0x29, 0xbf, 0xf1, 0xb7, 0xbe, 0x66, 0x12, 0xc7, 0x92, 0xa7, 0xb4, 0xe6, 0xf8, 0xc9, 0x13, 0xa7, 0x41, 0xde, 0xae, 0xca, 0x82, 0xa7, 0x54, 0xd7, 0x10, 0xcb, 0x57, 0xa7, 0x1b, 0xcf, 0x54, 0xcc, 0x17, 0xa7, 0x13, 0xc7, 0x6f, 0xcc, 0xab, 0xa6, 0xf6, 0xbf, 0x7d, 0xcc, 0xc8, 0xa7, 0x1b, 0xb7, 0x26, 0xcc, 0xb9, 0xa7, 0x2f, 0xae, 0xbb, 0xcc, 0x61, 0xa7, 0x90, 0xa6, 0x26, 0xcc, 0x0d, 0xa8, 0x1c, 0x9d, 0xae, 0xcb, 0x97, 0xa8, 0xdd, 0x95, 0x66, 0xcb, 0x18, 0xa9, 0xcd, 0x8d, 0x57, 0xca, 0x8a, 0xab, 0x0a, 0x85, 0xa6, 0xca, 0x09, 0xac, 0x56, 0x7e, 0x25, 0xc9, 0xa1, 0xad, 0xd7, 0x77, 0x23, 0xc9, 0x28, 0xaf, 0x58, 0x70, 0x2e, 0xc9, 0x53, 0xb0, 0xa2, 0x68, 0xa1, 0xd0, 0x69, 0xa0, 0xbd, 0xeb, 0x79, 0xd1, 0x9d, 0xa0, 0x5f, 0xe3, 0x59, 0xd2, 0xa3, 0xa0, 0x41, 0xdb, 0x7d, 0xd3, 0x7d, 0xa0, 0x30, 0xd3, 0xb3, 0xd4, 0x3b, 0xa0, 0x1d, 0xcb, 0xc3, 0xd4, 0xc2, 0x9f, 0xfc, 0xc3, 0xa1, 0xd5, 0x17, 0xa0, 0x03, 0xbb, 0x5d, 0xd5, 0x36, 0xa0, 0x1f, 0xb2, 0xf5, 0xd5, 0x17, 0xa0, 0x6e, 0xaa, 0x8e, 0xd4, 0xd3, 0xa0, 0xd9, 0xa2, 0x21, 0xd4, 0x93, 0xa1, 0x88, 0x99, 0xde, 0xd4, 0x51, 0xa2, 0x4e, 0x91, 0xac, 0xd3, 0xfc, 0xa3, 0x95, 0x89, 0xeb, 0xd3, 0x77, 0xa4, 0xda, 0x82, 0x31, 0xd3, 0x38, 0xa6, 0x56, 0x7a, 0xee, 0xd2, 0xf4, 0xa7, 0xd5, 0x73, 0xdb, 0xd2, 0xed, 0xa9, 0x2c, 0x6c, 0x5c, 0xd7, 0xfc, 0x9a, 0x0a, 0xef, 0x25, 0xd9, 0x02, 0x99, 0xc3, 0xe7, 0x0c, 0xda, 0x04, 0x99, 0x95, 0xdf, 0x25, 0xdb, 0x16, 0x99, 0x64, 0xd7, 0x47, 0xdc, 0x1d, 0x99, 0x34, 0xcf, 0x6c, 0xdc, 0xae, 0x99, 0x21, 0xc7, 0x47, 0xdd, 0x34, 0x99, 0x17, 0xbf, 0x22, 0xdd, 0x64, 0x99, 0x4b, 0xb6, 0xe0, 0xdd, 0x8b, 0x99, 0x89, 0xae, 0xa5, 0xdd, 0x60, 0x99, 0xe5, 0xa6, 0x82, 0xdd, 0x35, 0x9a, 0x5a, 0x9e, 0x5f, 0xdd, 0x13, 0x9b, 0x13, 0x96, 0x4d, 0xdc, 0xef, 0x9b, 0xf0, 0x8e, 0x4d, 0xdc, 0xc2, 0x9d, 0x47, 0x86, 0x90, 0xdc, 0x96, 0x9e, 0xa6, 0x7e, 0xe1, 0xdc, 0x9e, 0xa0, 0x34, 0x77, 0x7d, 0xdc, 0xbe, 0xa2, 0x0b, 0x70, 0x14, 0xdf, 0x1b, 0x93, 0x6e, 0xf2, 0xd0, 0xe0, 0x60, 0x93, 0x2d, 0xea, 0x9e, 0xe1, 0x59, 0x93, 0x04, 0xe2, 0xb5, 0xe2, 0x55, 0x92, 0xcf, 0xda, 0xc7, 0xe3, 0x4a, 0x92, 0x91, 0xd2, 0xdd, 0xe4, 0x02, 0x92, 0x6f, 0xca, 0xd5, 0xe4, 0xa1, 0x92, 0x5b, 0xc2, 0xc0, 0xe5, 0x0e, 0x92, 0x7f, 0xba, 0xa8, 0xe5, 0x63, 0x92, 0xbf, 0xb2, 0x8c, 0xe5, 0x8f, 0x93, 0x03, 0xaa, 0x8e, 0xe5, 0xb6, 0x93, 0x56, 0xa2, 0xa1, 0xe5, 0xd4, 0x93, 0xf3, 0x9a, 0xb8, 0xe5, 0xf7, 0x94, 0xb4, 0x92, 0xd2, 0xe6, 0x2b, 0x95, 0xdd, 0x8a, 0xf9, 0xe6, 0x44, 0x97, 0x27, 0x83, 0x27, 0xe6, 0xdf, 0x98, 0xe5, 0x7b, 0x86, 0xe7, 0xaf, 0x9a, 0xfd, 0x73, 0x3d, 0xe5, 0x13, 0x8d, 0x6d, 0xf6, 0xbe, 0xe6, 0x5d, 0x8d, 0x27, 0xee, 0x63, 0xe7, 0x74, 0x8c, 0xe8, 0xe6, 0x67, 0xe8, 0x7e, 0x8c, 0xa9, 0xde, 0x69, 0xe9, 0x89, 0x8c, 0x5f, 0xd6, 0x56, 0xea, 0x84, 0x8c, 0x1d, 0xce, 0x4a, 0xeb, 0x50, 0x8b, 0xfe, 0xc6, 0x54, 0xec, 0x09, 0x8b, 0xec, 0xbe, 0x66, 0xec, 0x83, 0x8c, 0x10, 0xb6, 0x7f, 0xec, 0xf3, 0x8c, 0x3a, 0xae, 0x9c, 0xed, 0x4d, 0x8c, 0x86, 0xa6, 0xc6, 0xed, 0xaa, 0x8c, 0xe1, 0x9e, 0xef, 0xee, 0x04, 0x8d, 0xa8, 0x97, 0x1e, 0xee, 0x63, 0x8e, 0x7a, 0x8f, 0x46, 0xee, 0xd8, 0x8f, 0xb1, 0x87, 0x5e, 0xef, 0x6d, 0x90, 0xf8, 0x7f, 0x72, 0xf2, 0x5a, 0x93, 0x2d, 0x76, 0xfc, 0xeb, 0x7b, 0x87, 0x50, 0xfa, 0x32, 0xec, 0xa7, 0x87, 0x21, 0xf2, 0x05, 0xed, 0xc6, 0x86, 0xe6, 0xea, 0x07, 0xee, 0xc9, 0x86, 0xa6, 0xe2, 0x13, 0xef, 0xe7, 0x86, 0x4f, 0xd9, 0xeb, 0xf1, 0x24, 0x85, 0xf2, 0xd1, 0xae, 0xf2, 0x29, 0x85, 0xc0, 0xc9, 0xd8, 0xf3, 0x28, 0x85, 0xaf, 0xc2, 0x2b, 0xf3, 0xfd, 0x85, 0xbb, 0xba, 0x7f, 0xf4, 0xb8, 0x85, 0xc6, 0xb2, 0xd1, 0xf5, 0x54, 0x86, 0x0e, 0xab, 0x21, 0xf5, 0xe0, 0x86, 0x6e, 0xa3, 0x6c, 0xf6, 0x7b, 0x87, 0x06, 0x9b, 0xb8, 0xf7, 0x14, 0x87, 0xbd, 0x94, 0x03, 0xf8, 0x23, 0x88, 0xb6, 0x8c, 0x4a, 0xf9, 0xf3, 0x8a, 0x1c, 0x84, 0x83, 0xfe, 0x8c, 0x8b, 0x51, 0x7a, 0xfc, 0xf2, 0x9e, 0x80, 0xad, 0xfd, 0x9a, 0xf3, 0x8b, 0x80, 0xcb, 0xf5, 0x8c, 0xf4, 0x8e, 0x80, 0xbd, 0xed, 0xa7, 0xf5, 0x8d, 0x80, 0x79, 0xe5, 0xa6, 0xf6, 0x8c, 0x80, 0x27, 0xdd, 0x99, 0xf7, 0xd1, 0x7f, 0xa2, 0xd5, 0x52, 0xf9, 0x24, 0x7f, 0x33, 0xcd, 0x70, 0xfa, 0x24, 0x7f, 0x13, 0xc5, 0xf8, 0xfb, 0x21, 0x7e, 0xfb, 0xbe, 0x85, 0xfb, 0xf9, 0x7e, 0xf7, 0xb7, 0x03, 0xfc, 0xca, 0x7f, 0x04, 0xaf, 0x7d, 0xfd, 0x74, 0x7f, 0x67, 0xa7, 0xdd, 0xfe, 0x1c, 0x7f, 0xd0, 0xa0, 0x37, 0xfe, 0xbf, 0x80, 0x47, 0x98, 0x8d, 0xff, 0x9f, 0x80, 0x93, 0x90, 0xd3, 0xff, 0xff, 0x80, 0xf1, 0x88, 0xd7, 0xff, 0xff, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x41, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x00, 0x01, 0x3c, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xb0, 0xba, 0xff, 0xfe, 0x4f, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x17, 0xff, 0xff, 0x52, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x81, 0x00, 0x00, 0x80, 0x81, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0x9e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x5e, 0x4c, 0x00, 0x00, 0x02, 0x30, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0x8f, 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x08, 0x70, 0x00, 0x00, 0x02, 0x44, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0xe4, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf3, 0x4d, 0x00, 0x00, 0x01, 0xdf, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x0f, 0x84, 0xb6, 0xc2, 0x62, 0x96, 0xb7, 0x86, 0x18, 0xd9, 0x87, 0x35, 0xc7, 0x0a, 0xcf, 0x9c, 0x6f, 0xa0, 0x38, 0xf5, 0x03, 0x90, 0x94, 0x3e, 0x48, 0x79, 0xba, 0x53, 0xd2, 0x36, 0xf0, 0x7b, 0x1c, 0x6a, 0xf6, 0xd5, 0xff, 0xff, 0xd3, 0x2c, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x6d, 0x42, 0x41, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x73, 0xec, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x1b, 0x3f, 0xa2, 0xc4, 0xaa, 0x74, 0x1f, 0x99, 0xa0, 0x5a, 0xa3, 0xd1, 0x20, 0x6b, 0x9f, 0x0e, 0xa1, 0x1a, 0x22, 0x7a, 0x9d, 0xa0, 0x9b, 0xa8, 0x25, 0x65, 0x9a, 0xdb, 0x94, 0xc2, 0x24, 0x38, 0x9c, 0x4c, 0x8f, 0x4e, 0x26, 0xf4, 0x9a, 0xb1, 0x8a, 0x8a, 0x26, 0xbd, 0x99, 0x7f, 0x88, 0x14, 0x28, 0x73, 0x8e, 0x71, 0x74, 0x7a, 0x28, 0xe8, 0x8c, 0xaa, 0x66, 0xa3, 0x28, 0xb5, 0x8c, 0x5d, 0x64, 0x91, 0x28, 0x80, 0x8c, 0x11, 0x62, 0x79, 0x20, 0xee, 0x87, 0xb3, 0x4a, 0xc5, 0x21, 0x0d, 0x88, 0xc9, 0x27, 0x85, 0x21, 0x95, 0x89, 0x20, 0x24, 0xad, 0x22, 0x1f, 0x89, 0x6e, 0x22, 0x33, 0x20, 0x61, 0x87, 0x83, 0x1c, 0xd3, 0x17, 0xca, 0xa1, 0x46, 0xb7, 0xba, 0x1a, 0x79, 0x9f, 0x8e, 0xa8, 0xfb, 0x20, 0x13, 0x9d, 0x11, 0xa0, 0x5a, 0x21, 0x12, 0x9b, 0x51, 0x9c, 0xeb, 0x23, 0xf6, 0x99, 0x9e, 0x95, 0x6b, 0x25, 0x92, 0x97, 0x1c, 0x8f, 0xa2, 0x24, 0x29, 0x98, 0xc6, 0x89, 0x8a, 0x29, 0xe5, 0x86, 0x64, 0x75, 0x1a, 0x29, 0x99, 0x8a, 0xb9, 0x72, 0xac, 0x27, 0xcf, 0x8b, 0xdc, 0x64, 0x36, 0x28, 0xd3, 0x8a, 0xd8, 0x62, 0xd8, 0x28, 0x31, 0x89, 0x8c, 0x5c, 0xf8, 0x21, 0xc5, 0x7f, 0x65, 0x28, 0xea, 0x22, 0xb0, 0x80, 0xd6, 0x26, 0x1d, 0x22, 0x0f, 0x86, 0xf7, 0x22, 0x4c, 0x22, 0xdf, 0x87, 0x95, 0x1f, 0xe4, 0x23, 0x9d, 0x88, 0x1c, 0x1d, 0xd9, 0x16, 0xb0, 0x9f, 0x0d, 0xb7, 0x7a, 0x15, 0xfd, 0x9c, 0xc5, 0xb3, 0x12, 0x1a, 0x31, 0x9a, 0xec, 0xa5, 0xbb, 0x20, 0xb7, 0x98, 0x65, 0x9b, 0x64, 0x21, 0xfa, 0x95, 0xe7, 0x96, 0xcd, 0x25, 0x15, 0x93, 0xfc, 0x8d, 0xdb, 0x23, 0x4e, 0x83, 0xc5, 0x78, 0x74, 0x27, 0xc7, 0x88, 0x45, 0x78, 0x0b, 0x29, 0x16, 0x84, 0x63, 0x6b, 0xa0, 0x29, 0xe3, 0x84, 0xe1, 0x63, 0x30, 0x29, 0x1d, 0x86, 0x55, 0x5e, 0x11, 0x23, 0xae, 0x7d, 0xd6, 0x49, 0x0c, 0x23, 0x9f, 0x78, 0x66, 0x27, 0xb8, 0x23, 0xca, 0x7e, 0x1e, 0x24, 0x44, 0x24, 0x98, 0x80, 0xdf, 0x21, 0x7d, 0x24, 0x64, 0x85, 0xbd, 0x1d, 0xc8, 0x25, 0x21, 0x86, 0x75, 0x1b, 0xca, 0x0f, 0xe2, 0x9d, 0xc8, 0xc8, 0x38, 0x12, 0x22, 0x98, 0xc5, 0xb5, 0x60, 0x16, 0x01, 0x96, 0x94, 0xac, 0x3a, 0x1a, 0x61, 0x90, 0xa4, 0x9c, 0xa3, 0x1d, 0x7f, 0x8f, 0xca, 0x94, 0x8e, 0x1f, 0x46, 0x81, 0x4c, 0x7d, 0xeb, 0x23, 0xf4, 0x7e, 0x04, 0x75, 0x79, 0x27, 0x59, 0x7c, 0x13, 0x6d, 0x1c, 0x29, 0x2b, 0x79, 0x72, 0x63, 0x0b, 0x29, 0x62, 0x73, 0x7b, 0x51, 0x6f, 0x28, 0xff, 0x76, 0x79, 0x4d, 0x31, 0x25, 0x35, 0x6d, 0xa1, 0x2a, 0x66, 0x26, 0xec, 0x74, 0x5c, 0x26, 0xcc, 0x25, 0xf6, 0x77, 0x40, 0x22, 0xe2, 0x26, 0x41, 0x7c, 0xfa, 0x1f, 0xcf, 0x27, 0x09, 0x80, 0xe9, 0x1c, 0xc1, 0x26, 0xfa, 0x84, 0x87, 0x19, 0x6f, 0x0c, 0x33, 0x94, 0xcb, 0xc8, 0x74, 0x10, 0x4e, 0x8b, 0x2f, 0xb2, 0xb9, 0x11, 0x81, 0x88, 0x5b, 0xa5, 0x66, 0x15, 0x19, 0x84, 0xbd, 0x98, 0x3a, 0x1b, 0x8f, 0x7b, 0x05, 0x85, 0x38, 0x1d, 0x38, 0x73, 0x90, 0x75, 0x64, 0x22, 0x92, 0x73, 0x11, 0x6e, 0x0c, 0x27, 0x99, 0x6e, 0xd5, 0x63, 0x78, 0x29, 0x4b, 0x68, 0xff, 0x52, 0xc3, 0x29, 0x25, 0x6d, 0x11, 0x4e, 0xcd, 0x26, 0xf0, 0x66, 0xdb, 0x36, 0xb3, 0x25, 0x5c, 0x66, 0x0f, 0x25, 0xab, 0x27, 0x4e, 0x6a, 0x99, 0x22, 0xbe, 0x28, 0xca, 0x72, 0x46, 0x21, 0x75, 0x28, 0xb5, 0x76, 0x30, 0x1e, 0x4d, 0x29, 0x01, 0x7b, 0xe0, 0x1b, 0x97, 0x57, 0xc9, 0x8b, 0x8e, 0x15, 0x45, 0x09, 0x38, 0x89, 0x7a, 0xca, 0x5d, 0x0a, 0x15, 0x88, 0xbd, 0xc0, 0x75, 0x0f, 0xa7, 0x7c, 0x8b, 0xa1, 0xae, 0x14, 0x84, 0x74, 0xf7, 0x8e, 0x4d, 0x19, 0x90, 0x6f, 0x45, 0x81, 0x83, 0x1a, 0xaf, 0x67, 0xeb, 0x71, 0x1d, 0x20, 0xe4, 0x64, 0x05, 0x63, 0xdd, 0x27, 0x13, 0x61, 0xd9, 0x59, 0x7e, 0x28, 0x0f, 0x5e, 0xec, 0x4b, 0xb5, 0x28, 0xa6, 0x58, 0x98, 0x38, 0xf9, 0x28, 0x4e, 0x58, 0x00, 0x28, 0xab, 0x28, 0x65, 0x5d, 0x09, 0x23, 0xc8, 0x27, 0x8d, 0x62, 0xe7, 0x1c, 0xfd, 0x29, 0x4f, 0x68, 0x68, 0x1b, 0xf4, 0x45, 0x94, 0x72, 0x05, 0x19, 0xb9, 0x58, 0xcb, 0x7f, 0xca, 0x18, 0x4e, 0x69, 0x41, 0x8e, 0x5b, 0x19, 0xba, 0x07, 0x04, 0x7d, 0xae, 0xcb, 0xa1, 0x06, 0x55, 0x7d, 0xf8, 0xc2, 0xa2, 0x0d, 0xc4, 0x6e, 0xfb, 0x9e, 0x00, 0x12, 0x22, 0x69, 0xae, 0x8d, 0x76, 0x16, 0xa1, 0x60, 0xd8, 0x7a, 0xdd, 0x16, 0x7b, 0x59, 0xac, 0x6a, 0x90, 0x1f, 0x2f, 0x54, 0xb7, 0x5b, 0x33, 0x25, 0x15, 0x51, 0xac, 0x4f, 0x1a, 0x28, 0xc4, 0x4d, 0xe7, 0x3e, 0x80, 0x2b, 0x08, 0x49, 0xce, 0x2c, 0xe0, 0x2a, 0x85, 0x4e, 0xc6, 0x27, 0xcd, 0x2c, 0x55, 0x53, 0x00, 0x1d, 0x61, 0x39, 0xff, 0x5a, 0xe6, 0x17, 0x62, 0x49, 0xdc, 0x67, 0x5a, 0x17, 0xa1, 0x59, 0x3e, 0x72, 0x34, 0x18, 0xa8, 0x64, 0x29, 0x7c, 0x19, 0x19, 0x03, 0x70, 0x11, 0x87, 0xdf, 0x19, 0x0f, 0x02, 0x2c, 0x6c, 0x56, 0xce, 0x38, 0x01, 0x6a, 0x6a, 0x88, 0xc4, 0x64, 0x09, 0xeb, 0x5d, 0x3a, 0x9e, 0x92, 0x0a, 0xa3, 0x57, 0x7f, 0x88, 0x18, 0x0f, 0x2e, 0x51, 0x79, 0x74, 0x3f, 0x13, 0x34, 0x48, 0x2e, 0x60, 0x8c, 0x1e, 0x0c, 0x43, 0xe1, 0x53, 0xa3, 0x24, 0x7b, 0x40, 0x79, 0x44, 0x6e, 0x29, 0x88, 0x3d, 0x2c, 0x35, 0x06, 0x2c, 0xde, 0x3e, 0xee, 0x2b, 0x51, 0x34, 0x48, 0x45, 0x33, 0x23, 0x54, 0x3e, 0xca, 0x4d, 0xc2, 0x1c, 0xf9, 0x4b, 0xbd, 0x58, 0x78, 0x19, 0x0a, 0x56, 0x7f, 0x61, 0xdf, 0x16, 0xc7, 0x63, 0x74, 0x6e, 0x43, 0x18, 0xfc, 0x71, 0x61, 0x79, 0x48, 0x19, 0xd6, 0x7e, 0xe0, 0x84, 0xe5, 0x1a, 0x09, 0x0c, 0x5d, 0x5b, 0x46, 0xce, 0x93, 0x03, 0xd4, 0x59, 0xc8, 0xc5, 0x69, 0x04, 0x89, 0x4d, 0x3a, 0xa0, 0xe7, 0x06, 0xc3, 0x48, 0x98, 0x89, 0x80, 0x0c, 0x83, 0x40, 0xb8, 0x6f, 0x73, 0x16, 0x3e, 0x39, 0x2b, 0x5d, 0xfe, 0x1b, 0xee, 0x30, 0xe6, 0x4a, 0x6c, 0x23, 0x93, 0x2e, 0x1c, 0x3a, 0xdc, 0x28, 0xf6, 0x28, 0xf6, 0x28, 0xf6, 0x37, 0x64, 0x35, 0x31, 0x29, 0x14, 0x42, 0x82, 0x3e, 0xb7, 0x23, 0xd5, 0x4e, 0x2a, 0x49, 0x49, 0x1f, 0x55, 0x58, 0x96, 0x54, 0x28, 0x1c, 0x0f, 0x62, 0x73, 0x5d, 0xd3, 0x18, 0x86, 0x6e, 0xa8, 0x69, 0x78, 0x19, 0xd2, 0x7a, 0xcc, 0x75, 0xa7, 0x1b, 0x53, 0x87, 0x9e, 0x80, 0x01, 0x1c, 0x37, 0x2b, 0x67, 0x30, 0x11, 0xd1, 0x03, 0x1a, 0x21, 0x33, 0x49, 0xc8, 0x70, 0x04, 0x3e, 0x3d, 0x75, 0xa6, 0x64, 0x08, 0x4d, 0x36, 0xfa, 0x89, 0xb6, 0x16, 0xd4, 0x30, 0xd9, 0x73, 0x41, 0x20, 0xf9, 0x2c, 0x2b, 0x5e, 0x4c, 0x28, 0xc6, 0x27, 0xa3, 0x4c, 0xa4, 0x31, 0x23, 0x25, 0xe0, 0x3d, 0x9b, 0x38, 0x57, 0x24, 0x9c, 0x2e, 0x71, 0x44, 0x92, 0x2c, 0x59, 0x29, 0xb6, 0x4f, 0x6a, 0x35, 0xd1, 0x24, 0xdc, 0x5a, 0x9f, 0x3f, 0x36, 0x21, 0x6e, 0x64, 0xe1, 0x4b, 0x34, 0x1e, 0x2d, 0x6d, 0x8e, 0x56, 0x63, 0x1a, 0x2b, 0x79, 0x63, 0x62, 0x66, 0x1b, 0x4f, 0x8a, 0xa4, 0x72, 0x7e, 0x1e, 0x21, 0x91, 0xfd, 0x78, 0xb0, 0x1e, 0x9b, 0x2b, 0x7e, 0x30, 0x00, 0xd1, 0x09, 0x2b, 0x43, 0x2f, 0xf1, 0xd0, 0xe0, 0x17, 0x13, 0x29, 0xef, 0xb2, 0x35, 0x18, 0x75, 0x27, 0x84, 0x8f, 0xf3, 0x23, 0xee, 0x25, 0xfb, 0x77, 0x2e, 0x2d, 0x1c, 0x23, 0x06, 0x61, 0xd7, 0x36, 0xbf, 0x1d, 0xf9, 0x4f, 0x5c, 0x3e, 0x34, 0x1d, 0x46, 0x3f, 0xb9, 0x45, 0xf9, 0x1c, 0x00, 0x31, 0x52, 0x51, 0xb5, 0x26, 0x5e, 0x28, 0xaf, 0x5b, 0xa1, 0x2a, 0x1a, 0x25, 0x98, 0x64, 0xa2, 0x34, 0xde, 0x21, 0x94, 0x6f, 0xa8, 0x40, 0x24, 0x1e, 0xb0, 0x76, 0xea, 0x4a, 0x6a, 0x1b, 0x17, 0x85, 0xf6, 0x57, 0xfa, 0x1c, 0x77, 0x92, 0xfe, 0x65, 0x14, 0x1f, 0xd7, 0x96, 0xde, 0x70, 0x60, 0x1f, 0x5d, 0x2b, 0x95, 0x2f, 0xf1, 0xd1, 0x0f, 0x2b, 0x70, 0x2f, 0xd3, 0xd0, 0xec, 0x1f, 0x92, 0x24, 0xed, 0xbe, 0x3c, 0x22, 0xe0, 0x1e, 0xb2, 0x9b, 0x30, 0x34, 0x86, 0x1e, 0xa5, 0x7e, 0x4c, 0x42, 0x0b, 0x1f, 0x72, 0x6c, 0xf6, 0x4c, 0x54, 0x1f, 0x35, 0x5d, 0x09, 0x52, 0x9b, 0x1c, 0xcb, 0x4e, 0x21, 0x57, 0x95, 0x1a, 0x8d, 0x3c, 0x9b, 0x5e, 0x45, 0x1d, 0x19, 0x2d, 0xf9, 0x65, 0x55, 0x23, 0x0e, 0x24, 0xfb, 0x6f, 0xdd, 0x25, 0xad, 0x21, 0xa2, 0x77, 0x8c, 0x2f, 0x75, 0x1d, 0xf6, 0x86, 0x31, 0x43, 0x06, 0x1b, 0x84, 0x90, 0x18, 0x48, 0xab, 0x1d, 0x1d, 0x96, 0xe9, 0x54, 0xf8, 0x1e, 0xa3, 0xa0, 0x4b, 0x61, 0x89, 0x1f, 0xeb, 0x2b, 0xab, 0x2f, 0xe2, 0xd1, 0x16, 0x2b, 0x9d, 0x2f, 0xb4, 0xd0, 0xfa, 0x27, 0x15, 0x20, 0xa8, 0xc6, 0xa2, 0x42, 0xec, 0x1b, 0xe3, 0xa5, 0x15, 0x50, 0x81, 0x1e, 0xd8, 0x8b, 0xfd, 0x5b, 0x78, 0x20, 0x37, 0x7b, 0x94, 0x60, 0x5b, 0x1f, 0xc6, 0x69, 0x04, 0x66, 0x19, 0x1c, 0x22, 0x5a, 0x6b, 0x68, 0xc5, 0x18, 0x9e, 0x48, 0xed, 0x6e, 0xef, 0x1a, 0xe1, 0x39, 0x04, 0x73, 0xad, 0x1e, 0xfa, 0x28, 0xc4, 0x7b, 0xd4, 0x20, 0x7e, 0x22, 0xfe, 0x83, 0xfd, 0x21, 0xf7, 0x20, 0x01, 0x8b, 0x4a, 0x2a, 0xb6, 0x18, 0x91, 0x95, 0x53, 0x3f, 0xb4, 0x1a, 0x67, 0x9b, 0x44, 0x4c, 0x52, 0x1d, 0xf7, 0xa5, 0xe9, 0x55, 0x2a, 0x1e, 0x60, 0x2b, 0xc2, 0x2f, 0xd2, 0xd1, 0x1c, 0x2b, 0xca, 0x2f, 0x95, 0xd1, 0x06, 0x4d, 0x51, 0x14, 0x5d, 0xc6, 0xae, 0x5f, 0x75, 0x1b, 0x52, 0xaa, 0xd5, 0x6e, 0x81, 0x1e, 0x42, 0x9a, 0x52, 0x72, 0x67, 0x1f, 0xd4, 0x86, 0xfc, 0x75, 0x0c, 0x1e, 0x71, 0x78, 0xa7, 0x79, 0x2a, 0x1a, 0xd3, 0x67, 0xa0, 0x7c, 0x34, 0x18, 0xe8, 0x56, 0x5e, 0x80, 0x62, 0x19, 0x3e, 0x46, 0x03, 0x84, 0x6e, 0x1a, 0xe4, 0x37, 0x9f, 0x87, 0xee, 0x1e, 0x80, 0x25, 0xdd, 0x91, 0x74, 0x20, 0x80, 0x22, 0xfb, 0x97, 0xc4, 0x21, 0x10, 0x1f, 0xd4, 0xa1, 0x04, 0x22, 0xc4, 0x1e, 0x71, 0xa8, 0x99, 0x40, 0xe8, 0x1b, 0x1f, 0xac, 0x4f, 0x4a, 0xce, 0x1c, 0x74, 0x2c, 0x77, 0x2e, 0xe3, 0xd0, 0x78, 0x2c, 0x9a, 0x2e, 0x99, 0xd0, 0x69, 0x77, 0xca, 0x16, 0x54, 0xca, 0x59, 0x83, 0xd5, 0x17, 0x67, 0xc0, 0x4c, 0x85, 0xec, 0x1b, 0x71, 0xa7, 0x04, 0x88, 0xa3, 0x1c, 0xd5, 0x99, 0x0f, 0x8b, 0xe3, 0x1c, 0x1c, 0x88, 0x20, 0x90, 0x70, 0x1a, 0x9c, 0x76, 0xef, 0x92, 0x97, 0x1b, 0xd5, 0x63, 0xd8, 0x95, 0x62, 0x1b, 0x7c, 0x54, 0x65, 0x97, 0x95, 0x1b, 0x75, 0x44, 0xd0, 0x9b, 0xf8, 0x1c, 0xa9, 0x35, 0xfb, 0x9a, 0xc4, 0x1f, 0x40, 0x25, 0x2a, 0xa2, 0x7a, 0x1e, 0xc1, 0x21, 0x98, 0xae, 0x38, 0x1f, 0xdc, 0x1f, 0x5b, 0xc1, 0xb4, 0x24, 0xf1, 0x21, 0xe0, 0xc3, 0xb7, 0x27, 0x6a, 0x23, 0x1d, 0x2d, 0xc2, 0x36, 0xec, 0xca, 0x1f, 0x81, 0x40, 0x1c, 0xbf, 0xd2, 0x0a, 0x90, 0x23, 0x1b, 0x9a, 0xcc, 0xfa, 0x9a, 0xcf, 0x1a, 0x87, 0xc5, 0x30, 0x9a, 0x2c, 0x1c, 0x8c, 0xb4, 0x0b, 0x9f, 0xd9, 0x1d, 0x84, 0xab, 0x6f, 0xa1, 0xec, 0x1d, 0x08, 0x9a, 0x58, 0x9f, 0xc6, 0x1c, 0x03, 0x82, 0x9d, 0x9e, 0x80, 0x1c, 0x79, 0x6b, 0x94, 0xa0, 0xd8, 0x1d, 0x54, 0x5b, 0x92, 0xa2, 0xf6, 0x1c, 0x2f, 0x4a, 0xeb, 0xa2, 0xfd, 0x1b, 0x8e, 0x3b, 0x96, 0xa6, 0xed, 0x1b, 0x2d, 0x25, 0xd0, 0xbc, 0xc3, 0x1b, 0x27, 0x21, 0xc1, 0xc3, 0x25, 0x22, 0x1e, 0x23, 0xf9, 0xc4, 0xd6, 0x25, 0x35, 0x24, 0xb6, 0xc5, 0xe4, 0x27, 0x2a, 0x25, 0x2e, 0x88, 0x14, 0x1f, 0xb6, 0xd4, 0xe7, 0x97, 0x5f, 0x1f, 0xa7, 0xd1, 0xd6, 0x9f, 0x85, 0x1d, 0x54, 0xcd, 0xaf, 0xa2, 0xcd, 0x1b, 0x5a, 0xc8, 0xe9, 0xb2, 0xaf, 0x1c, 0xbe, 0xc6, 0x7a, 0xbd, 0x1e, 0x1d, 0x23, 0xc2, 0x40, 0xb2, 0x50, 0x1e, 0x12, 0xa5, 0xd8, 0xb5, 0x4d, 0x1d, 0x0d, 0x96, 0x09, 0xbe, 0xbc, 0x1c, 0x02, 0x81, 0xf2, 0xaf, 0xe4, 0x1d, 0x1b, 0x64, 0x53, 0xaf, 0x13, 0x1c, 0x3d, 0x53, 0x5d, 0xae, 0x2b, 0x1a, 0xbe, 0x41, 0x99, 0xb3, 0xf2, 0x14, 0xf1, 0x2f, 0x22, 0xc4, 0x8c, 0x1f, 0x68, 0x26, 0x2f, 0xc5, 0xf1, 0x23, 0x0e, 0x26, 0x63, 0xc6, 0xcd, 0x25, 0x63, 0x26, 0x86, 0xc7, 0x62, 0x26, 0xff, 0x26, 0xa0, 0x1b, 0xb9, 0xa4, 0x53, 0xac, 0x88, 0x1f, 0x4f, 0xa1, 0xee, 0xa5, 0x97, 0x20, 0x28, 0xa0, 0xa5, 0xa2, 0xd2, 0x22, 0x41, 0x9f, 0x2c, 0x9d, 0x3e, 0x25, 0x46, 0x9c, 0x37, 0x96, 0x17, 0x24, 0x24, 0x9d, 0x80, 0x90, 0x7b, 0x26, 0xd7, 0x9b, 0xcd, 0x8b, 0x9b, 0x26, 0x9d, 0x9a, 0x97, 0x89, 0x04, 0x28, 0x2e, 0x8f, 0x59, 0x75, 0x4f, 0x28, 0xdd, 0x8d, 0x45, 0x67, 0x5e, 0x28, 0xa5, 0x8c, 0xf0, 0x65, 0x0d, 0x28, 0x67, 0x8c, 0x9a, 0x62, 0xb2, 0x20, 0xea, 0x88, 0xde, 0x4b, 0x41, 0x20, 0xfa, 0x89, 0xd5, 0x28, 0x02, 0x21, 0x8a, 0x8a, 0x1f, 0x24, 0xef, 0x1f, 0x8d, 0x88, 0xfa, 0x1f, 0x37, 0x1e, 0x1f, 0x8f, 0xd5, 0x1a, 0xc4, 0x18, 0x89, 0xa3, 0x1a, 0xb9, 0xc7, 0x1a, 0x78, 0xa1, 0x57, 0xab, 0x73, 0x1f, 0xc5, 0x9e, 0xb8, 0xa2, 0x3d, 0x20, 0xd2, 0x9c, 0xf7, 0x9e, 0xb0, 0x23, 0xd0, 0x9b, 0x21, 0x96, 0xed, 0x25, 0x74, 0x98, 0x6c, 0x90, 0xeb, 0x24, 0x1a, 0x99, 0xdb, 0x8a, 0x9b, 0x27, 0x15, 0x96, 0xfc, 0x86, 0x2d, 0x29, 0x30, 0x8c, 0x23, 0x73, 0x4e, 0x27, 0xc5, 0x8c, 0x65, 0x64, 0xa0, 0x28, 0xc4, 0x8b, 0x46, 0x63, 0x26, 0x27, 0xef, 0x89, 0xa7, 0x5b, 0xa7, 0x20, 0xea, 0x86, 0x0f, 0x47, 0xf4, 0x21, 0xac, 0x85, 0x99, 0x25, 0xad, 0x22, 0x0d, 0x87, 0xf2, 0x22, 0x6a, 0x22, 0xbc, 0x88, 0x69, 0x1f, 0x9d, 0x23, 0x8b, 0x88, 0xeb, 0x1d, 0x70, 0x17, 0x8e, 0xa1, 0x1c, 0xb9, 0xcd, 0x16, 0xdf, 0x9f, 0x1b, 0xb5, 0xb7, 0x19, 0xbc, 0x9c, 0xb3, 0xa8, 0x54, 0x20, 0x69, 0x9a, 0x0d, 0x9d, 0x55, 0x21, 0xc3, 0x97, 0x78, 0x98, 0x7f, 0x24, 0xfb, 0x95, 0x4b, 0x8f, 0x2e, 0x23, 0x1b, 0x85, 0x10, 0x79, 0xf1, 0x27, 0xb6, 0x88, 0xed, 0x78, 0xe9, 0x28, 0xb5, 0x87, 0xdf, 0x6f, 0x9e, 0x28, 0x65, 0x87, 0x10, 0x63, 0x6c, 0x28, 0xfd, 0x87, 0xf4, 0x5f, 0x80, 0x22, 0x76, 0x7f, 0xd6, 0x48, 0x8d, 0x22, 0xeb, 0x7b, 0xe2, 0x27, 0xd1, 0x23, 0xa2, 0x7e, 0xcf, 0x24, 0x40, 0x23, 0xf2, 0x85, 0x3f, 0x20, 0x02, 0x24, 0x64, 0x86, 0x79, 0x1d, 0x4a, 0x25, 0x33, 0x87, 0x30, 0x1b, 0x2b, 0x10, 0x36, 0xa0, 0x60, 0xca, 0xad, 0x12, 0x39, 0x9b, 0x40, 0xb8, 0x08, 0x15, 0xdb, 0x98, 0xc3, 0xae, 0xf5, 0x1a, 0x00, 0x92, 0x9c, 0x9e, 0xe3, 0x1d, 0x7a, 0x92, 0x05, 0x96, 0xa3, 0x1f, 0x68, 0x86, 0x3f, 0x83, 0x7f, 0x25, 0xdd, 0x7e, 0xec, 0x78, 0x77, 0x27, 0x55, 0x7d, 0x6f, 0x6e, 0x13, 0x29, 0x25, 0x7b, 0x94, 0x64, 0xf4, 0x29, 0xc1, 0x77, 0xed, 0x56, 0xac, 0x28, 0x89, 0x77, 0xec, 0x4c, 0xb2, 0x24, 0x6f, 0x70, 0xc7, 0x2d, 0x33, 0x26, 0xe3, 0x75, 0xe5, 0x27, 0x00, 0x25, 0xa9, 0x7a, 0x82, 0x22, 0x24, 0x26, 0x66, 0x7d, 0x86, 0x1f, 0x48, 0x26, 0x7e, 0x84, 0x28, 0x1a, 0x93, 0x27, 0x3f, 0x85, 0x27, 0x18, 0x8d, 0x0c, 0xb6, 0x97, 0xe1, 0xcb, 0x17, 0x0f, 0xbc, 0x96, 0x3c, 0xc4, 0x46, 0x11, 0x40, 0x8a, 0x9f, 0xa8, 0xe5, 0x14, 0xdc, 0x87, 0x88, 0x9a, 0xdd, 0x1b, 0x03, 0x7d, 0xfe, 0x88, 0x08, 0x1e, 0x38, 0x78, 0x3e, 0x79, 0x82, 0x22, 0x2b, 0x74, 0x5f, 0x6f, 0x80, 0x27, 0x9c, 0x70, 0x55, 0x64, 0x7c, 0x28, 0xd3, 0x6c, 0x8d, 0x56, 0xb7, 0x29, 0x0e, 0x6d, 0xd6, 0x4e, 0xe7, 0x26, 0xb4, 0x67, 0xef, 0x36, 0x7a, 0x24, 0xe5, 0x67, 0x9d, 0x25, 0x76, 0x27, 0xa1, 0x6c, 0xfe, 0x22, 0xb2, 0x29, 0x18, 0x73, 0xf3, 0x20, 0xe6, 0x28, 0xf0, 0x79, 0x3b, 0x1c, 0xcf, 0x29, 0xc7, 0x7c, 0xbc, 0x1a, 0x8c, 0x58, 0x35, 0x8e, 0x8a, 0x15, 0x3d, 0x0a, 0xc0, 0x8d, 0x3d, 0xcd, 0x5f, 0x0a, 0xee, 0x8c, 0xa4, 0xc4, 0xc9, 0x0b, 0x10, 0x86, 0x6c, 0xaf, 0xe4, 0x12, 0x56, 0x7c, 0x6e, 0x98, 0xa4, 0x19, 0x79, 0x72, 0xce, 0x85, 0x55, 0x1a, 0x6d, 0x6b, 0xe4, 0x75, 0x53, 0x20, 0x2e, 0x67, 0x43, 0x67, 0x6d, 0x26, 0xe5, 0x63, 0x89, 0x5a, 0xa7, 0x27, 0xcf, 0x61, 0x3a, 0x4d, 0xc9, 0x28, 0x89, 0x5c, 0x36, 0x3b, 0xf6, 0x27, 0xe4, 0x59, 0xd2, 0x28, 0xb8, 0x28, 0x35, 0x5e, 0x1c, 0x22, 0x54, 0x28, 0xa7, 0x64, 0xa6, 0x1c, 0x39, 0x32, 0x44, 0x6c, 0xfd, 0x1a, 0x4a, 0x47, 0xbb, 0x75, 0x11, 0x19, 0x35, 0x5a, 0x4d, 0x80, 0xc1, 0x18, 0x3a, 0x6d, 0xcf, 0x93, 0x36, 0x1a, 0x83, 0x09, 0x8c, 0x81, 0xd7, 0xce, 0xa6, 0x07, 0x1c, 0x81, 0x8e, 0xc6, 0x20, 0x0d, 0x17, 0x77, 0x3a, 0xab, 0xb8, 0x0f, 0x78, 0x70, 0x11, 0x95, 0x09, 0x16, 0xa7, 0x65, 0x61, 0x80, 0x51, 0x17, 0x85, 0x5d, 0x42, 0x6e, 0x55, 0x1e, 0x65, 0x57, 0xdf, 0x5e, 0x98, 0x25, 0x1f, 0x54, 0x4e, 0x50, 0xf5, 0x27, 0x75, 0x50, 0x94, 0x40, 0xcd, 0x29, 0xaa, 0x4c, 0xce, 0x2f, 0x42, 0x2a, 0x79, 0x50, 0x2d, 0x25, 0xa2, 0x32, 0x20, 0x55, 0x59, 0x1b, 0xc2, 0x3f, 0x4d, 0x5e, 0xf8, 0x16, 0x79, 0x4b, 0x91, 0x69, 0xac, 0x17, 0xa5, 0x5b, 0x0b, 0x75, 0x30, 0x18, 0xdd, 0x67, 0x53, 0x80, 0x94, 0x18, 0xcc, 0x73, 0xee, 0x8b, 0xe0, 0x19, 0x98, 0x05, 0x1d, 0x6f, 0xf8, 0xd1, 0xc7, 0x01, 0x36, 0x6e, 0xee, 0xc8, 0x58, 0x07, 0x4c, 0x6b, 0xd1, 0xb2, 0xdc, 0x0b, 0x86, 0x5e, 0xaf, 0x91, 0x6b, 0x0e, 0xa8, 0x57, 0x07, 0x7a, 0x3d, 0x12, 0x76, 0x4a, 0xb4, 0x65, 0x21, 0x1c, 0x5e, 0x47, 0x3c, 0x55, 0xc4, 0x23, 0x69, 0x42, 0xda, 0x46, 0xd0, 0x29, 0x88, 0x40, 0xd3, 0x38, 0x0a, 0x2d, 0xaf, 0x3f, 0x8e, 0x28, 0x17, 0x36, 0xd8, 0x47, 0x77, 0x21, 0xd2, 0x41, 0xb9, 0x50, 0x85, 0x1c, 0x61, 0x4d, 0xac, 0x5a, 0xc9, 0x17, 0xf0, 0x5a, 0x6a, 0x66, 0x7f, 0x18, 0x62, 0x67, 0x36, 0x71, 0xe3, 0x19, 0x33, 0x74, 0x82, 0x7c, 0xd4, 0x19, 0xe7, 0x7f, 0x75, 0x86, 0x19, 0x19, 0xeb, 0x0e, 0x95, 0x60, 0x24, 0xd2, 0x11, 0x03, 0x6c, 0x5e, 0x9a, 0xca, 0x2f, 0x04, 0x57, 0x51, 0x3d, 0xa6, 0xa9, 0x06, 0x64, 0x4c, 0xa1, 0x8e, 0xc2, 0x09, 0x79, 0x45, 0x3c, 0x73, 0xf3, 0x1c, 0x68, 0x3f, 0xd8, 0x64, 0xae, 0x26, 0x0c, 0x3b, 0xa6, 0x54, 0xfd, 0x2e, 0x1b, 0x38, 0xf2, 0x45, 0xbc, 0x34, 0x77, 0x35, 0x6a, 0x35, 0x89, 0x3a, 0x77, 0x37, 0x42, 0x27, 0x86, 0x45, 0x1c, 0x41, 0x36, 0x22, 0xd5, 0x50, 0x35, 0x4b, 0xf3, 0x1e, 0x9b, 0x5a, 0xdf, 0x56, 0xd4, 0x1a, 0xf5, 0x65, 0xa3, 0x61, 0x20, 0x18, 0x8f, 0x72, 0x97, 0x6d, 0xb8, 0x1a, 0x3b, 0x7f, 0xc6, 0x7a, 0xb6, 0x1b, 0xa2, 0x89, 0xd3, 0x81, 0xd7, 0x1b, 0xae, 0x2b, 0x74, 0x30, 0x1b, 0xd1, 0x0e, 0x0d, 0xbf, 0x4a, 0x53, 0xcb, 0x10, 0x04, 0x85, 0x43, 0x6a, 0xaf, 0xd5, 0x02, 0x0a, 0x37, 0x6c, 0x8d, 0xcc, 0x15, 0x9c, 0x36, 0xf9, 0x75, 0xb8, 0x26, 0x71, 0x33, 0xb3, 0x65, 0xe6, 0x32, 0x85, 0x31, 0x14, 0x55, 0x98, 0x3a, 0xfe, 0x2f, 0x7f, 0x46, 0xcc, 0x41, 0xb7, 0x2e, 0x7e, 0x37, 0xd6, 0x47, 0x88, 0x2e, 0x95, 0x28, 0x36, 0x52, 0xa8, 0x39, 0x21, 0x23, 0xce, 0x5c, 0xf0, 0x43, 0x47, 0x20, 0x47, 0x66, 0xa8, 0x4e, 0x3f, 0x1d, 0x0a, 0x6f, 0xb2, 0x5a, 0x12, 0x1a, 0x48, 0x7c, 0xff, 0x65, 0x90, 0x1b, 0x97, 0x8b, 0xa3, 0x73, 0x7d, 0x1e, 0x2f, 0x94, 0xa1, 0x7b, 0xac, 0x1e, 0xa3, 0x2b, 0x8b, 0x30, 0x0b, 0xd1, 0x14, 0x2b, 0x5c, 0x30, 0x06, 0xd0, 0xf7, 0x10, 0xbb, 0x30, 0x13, 0xba, 0x2a, 0x12, 0x22, 0x29, 0xf8, 0x94, 0x08, 0x25, 0x7c, 0x29, 0x73, 0x7b, 0x1c, 0x35, 0x04, 0x28, 0xa7, 0x69, 0x12, 0x40, 0xe1, 0x27, 0xb2, 0x59, 0x65, 0x48, 0x50, 0x26, 0x2e, 0x4a, 0x7d, 0x4e, 0xfb, 0x25, 0x21, 0x3a, 0x31, 0x53, 0xb1, 0x24, 0x40, 0x29, 0x0a, 0x5d, 0x9d, 0x2c, 0x2d, 0x24, 0xbf, 0x67, 0x49, 0x36, 0xe5, 0x20, 0xa0, 0x70, 0xbc, 0x42, 0x59, 0x1e, 0x1b, 0x7a, 0x01, 0x4e, 0x4c, 0x1a, 0xf5, 0x89, 0x03, 0x5b, 0xbc, 0x1d, 0x33, 0x95, 0x76, 0x68, 0x7f, 0x1f, 0xb6, 0x9b, 0x9f, 0x74, 0x30, 0x1f, 0x2a, 0x2b, 0xa0, 0x2f, 0xfb, 0xd1, 0x1b, 0x2b, 0x89, 0x2f, 0xe7, 0xd1, 0x03, 0x1f, 0x90, 0x25, 0x61, 0xc1, 0xfe, 0x25, 0x83, 0x1c, 0xe8, 0x9a, 0x4b, 0x37, 0x8e, 0x1d, 0xa6, 0x81, 0x39, 0x46, 0xd3, 0x1e, 0xcb, 0x6b, 0x65, 0x4f, 0x5c, 0x1e, 0x9b, 0x5e, 0x26, 0x56, 0xe8, 0x1c, 0xe9, 0x4f, 0x35, 0x5a, 0xd6, 0x1a, 0x6c, 0x3d, 0xe9, 0x61, 0x38, 0x1c, 0x77, 0x2f, 0x95, 0x68, 0x16, 0x22, 0x39, 0x24, 0x7d, 0x71, 0x36, 0x27, 0x25, 0x20, 0xe8, 0x7a, 0x3d, 0x31, 0xdc, 0x1c, 0x97, 0x88, 0x7d, 0x44, 0x7c, 0x1b, 0xdf, 0x94, 0x9f, 0x50, 0xb0, 0x1e, 0x75, 0x97, 0xe6, 0x57, 0xe0, 0x1e, 0xaa, 0xa1, 0x6e, 0x64, 0xd8, 0x1f, 0xc4, 0x2b, 0xb7, 0x2f, 0xeb, 0xd1, 0x21, 0x2b, 0xb5, 0x2f, 0xc7, 0xd1, 0x10, 0x2c, 0x68, 0x1d, 0x58, 0xc7, 0x3d, 0x45, 0x42, 0x1b, 0x7c, 0xa8, 0xce, 0x53, 0xe4, 0x1e, 0x82, 0x90, 0x96, 0x60, 0x39, 0x21, 0xf4, 0x7a, 0x10, 0x66, 0x28, 0x1f, 0xdc, 0x6b, 0xde, 0x69, 0x9a, 0x1b, 0x43, 0x5c, 0x78, 0x6c, 0x43, 0x18, 0x54, 0x4a, 0x75, 0x72, 0x12, 0x1a, 0xa4, 0x3b, 0x3f, 0x76, 0x87, 0x1c, 0xf7, 0x2b, 0x5e, 0x7c, 0xcd, 0x20, 0x43, 0x23, 0x1d, 0x86, 0x39, 0x22, 0x43, 0x1f, 0x61, 0x8f, 0x50, 0x2e, 0x7a, 0x18, 0xe8, 0x96, 0xcf, 0x42, 0xc5, 0x1a, 0xa0, 0x9f, 0xe0, 0x50, 0xbe, 0x1d, 0xf2, 0xaa, 0x86, 0x55, 0x77, 0x1d, 0x8b, 0x2b, 0xce, 0x2f, 0xdc, 0xd1, 0x27, 0x2c, 0x83, 0x2e, 0xca, 0xd0, 0x74, 0x53, 0xba, 0x13, 0x51, 0xcc, 0xa7, 0x69, 0x93, 0x15, 0xf3, 0xb7, 0xe4, 0x71, 0xde, 0x1e, 0x1b, 0x9b, 0xf1, 0x77, 0x43, 0x20, 0xc4, 0x88, 0x18, 0x7b, 0xf7, 0x1f, 0x99, 0x78, 0x90, 0x7b, 0xec, 0x19, 0xa4, 0x68, 0x98, 0x80, 0x6c, 0x19, 0x1e, 0x57, 0xa8, 0x82, 0xb9, 0x19, 0xe1, 0x47, 0xfe, 0x88, 0x2b, 0x1a, 0xa7, 0x38, 0xd5, 0x8b, 0xc5, 0x1d, 0xa8, 0x27, 0x90, 0x95, 0x00, 0x20, 0x3a, 0x22, 0xb7, 0x9b, 0x49, 0x21, 0x18, 0x1f, 0x2f, 0xa5, 0x98, 0x25, 0xf5, 0x1e, 0x62, 0xaa, 0xf4, 0x42, 0xbc, 0x1b, 0x26, 0xad, 0x5f, 0x4e, 0x1f, 0x1c, 0x50, 0x2a, 0xf7, 0x33, 0x32, 0xd0, 0x1f, 0x6d, 0xeb, 0x1c, 0xc5, 0xd6, 0x41, 0x7d, 0x7d, 0x19, 0x9a, 0xce, 0x12, 0x89, 0x37, 0x17, 0xe6, 0xc3, 0x9a, 0x88, 0x21, 0x1b, 0x99, 0xaa, 0x4c, 0x8c, 0x55, 0x1d, 0x39, 0x9a, 0xd8, 0x93, 0x82, 0x1d, 0x37, 0x8e, 0x70, 0x92, 0xe1, 0x1a, 0xee, 0x77, 0x7e, 0x96, 0x4b, 0x1c, 0x8c, 0x65, 0xeb, 0x96, 0xba, 0x1b, 0x91, 0x54, 0xd7, 0x9a, 0xcc, 0x1b, 0x95, 0x45, 0x92, 0x9f, 0x65, 0x1b, 0xf0, 0x36, 0x1f, 0x9f, 0xe2, 0x1e, 0x3a, 0x25, 0x13, 0xa8, 0xf0, 0x1e, 0x21, 0x20, 0xf4, 0xbf, 0xcd, 0x22, 0x11, 0x20, 0xd7, 0xc2, 0xf7, 0x26, 0x1c, 0x22, 0xbe, 0xc4, 0xaf, 0x28, 0x52, 0x23, 0xc8, 0x7b, 0x3b, 0x1f, 0xd5, 0xd8, 0x7d, 0x85, 0xa3, 0x1e, 0xc6, 0xd4, 0x11, 0x95, 0x6d, 0x1d, 0xd3, 0xcf, 0x68, 0x9f, 0x3d, 0x1a, 0xdb, 0xc8, 0x3d, 0xaa, 0x73, 0x1c, 0x00, 0xc2, 0x87, 0xa2, 0xaf, 0x1d, 0xdb, 0xac, 0x3e, 0xa6, 0x4a, 0x1d, 0x50, 0x9b, 0x8c, 0xa8, 0xde, 0x1b, 0xf6, 0x88, 0x99, 0xa5, 0xbb, 0x1c, 0x9f, 0x6f, 0x84, 0xa5, 0x82, 0x1d, 0x54, 0x5e, 0x37, 0xa4, 0x9c, 0x1c, 0xb2, 0x4c, 0x71, 0xab, 0x1c, 0x1a, 0x80, 0x3f, 0x5c, 0xae, 0x01, 0x18, 0x7c, 0x27, 0xb3, 0xc1, 0xcd, 0x1e, 0x29, 0x23, 0xdf, 0xc4, 0x68, 0x23, 0x47, 0x24, 0xd8, 0xc5, 0xcf, 0x26, 0x1b, 0x25, 0x61, 0xc6, 0xae, 0x27, 0xe5, 0x25, 0xba, 0x8d, 0x04, 0x21, 0x40, 0xd6, 0x05, 0x99, 0x3d, 0x21, 0x05, 0xd3, 0x91, 0xa3, 0xd0, 0x1e, 0xd0, 0xcf, 0xe0, 0xa7, 0xa5, 0x1a, 0xd3, 0xcc, 0xaa, 0xb4, 0xb5, 0x1c, 0xd7, 0xc8, 0x7a, 0xc1, 0x8a, 0x1d, 0x7a, 0xc3, 0xef, 0xc3, 0x6d, 0x1e, 0x7d, 0xb2, 0xfb, 0xc3, 0x1d, 0x1d, 0xbc, 0x9d, 0xfc, 0xc1, 0xe2, 0x1c, 0x2f, 0x83, 0x92, 0xc0, 0x03, 0x1c, 0x2b, 0x70, 0x40, 0xb0, 0xc5, 0x1c, 0x57, 0x54, 0x10, 0xaf, 0xf4, 0x1a, 0xc4, 0x42, 0x53, 0xbd, 0x1f, 0x11, 0x99, 0x27, 0x36, 0xc5, 0xd3, 0x20, 0x8f, 0x27, 0x10, 0xc6, 0xeb, 0x23, 0xf4, 0x27, 0x0f, 0xc7, 0x98, 0x26, 0x1e, 0x27, 0x12, 0xc8, 0x0d, 0x27, 0x9d, 0x27, 0x16, 0x1c, 0x37, 0xa5, 0xe4, 0xae, 0x9d, 0x1f, 0x13, 0xa3, 0xb0, 0xa7, 0x8d, 0x1f, 0xdc, 0xa2, 0x6d, 0xa4, 0xbe, 0x21, 0xfe, 0xa0, 0xee, 0x9f, 0x09, 0x25, 0x21, 0x9d, 0xc6, 0x97, 0x9a, 0x24, 0x0d, 0x9e, 0xe5, 0x91, 0xd7, 0x26, 0xb3, 0x9d, 0x1a, 0x8c, 0xdc, 0x26, 0x76, 0x9b, 0xe4, 0x8a, 0x21, 0x28, 0x58, 0x91, 0xb6, 0x77, 0x12, 0x2a, 0xe6, 0x8a, 0xfd, 0x67, 0x60, 0x28, 0x8f, 0x8d, 0xaa, 0x65, 0xab, 0x22, 0xcf, 0x8d, 0x2f, 0x5f, 0xe1, 0x23, 0x2e, 0x9a, 0x63, 0x5c, 0x55, 0x20, 0xe3, 0x8b, 0x09, 0x28, 0x92, 0x1e, 0x9e, 0x8a, 0xa8, 0x22, 0x79, 0x1d, 0x4e, 0x90, 0xcb, 0x1d, 0xcc, 0x1d, 0xc0, 0x91, 0x3f, 0x1a, 0x4a, 0x19, 0x4a, 0xa4, 0xef, 0xbb, 0xd0, 0x1b, 0x1d, 0xa3, 0x59, 0xae, 0x01, 0x1f, 0x6a, 0xa0, 0x9b, 0xa4, 0x63, 0x20, 0x85, 0x9e, 0xda, 0xa0, 0xbb, 0x23, 0x9a, 0x9c, 0xe4, 0x98, 0xb4, 0x25, 0x50, 0x99, 0xf9, 0x92, 0x6d, 0x24, 0x05, 0x9b, 0x27, 0x8b, 0xe2, 0x26, 0xf6, 0x98, 0x23, 0x87, 0x34, 0x28, 0xc2, 0x8d, 0x8b, 0x74, 0x2a, 0x28, 0xfd, 0x8c, 0x4a, 0x66, 0xa8, 0x28, 0xb1, 0x8b, 0xd8, 0x63, 0x90, 0x27, 0xa5, 0x89, 0xfb, 0x5a, 0x79, 0x20, 0xab, 0x88, 0x32, 0x46, 0xdf, 0x21, 0x49, 0x88, 0xa1, 0x26, 0x30, 0x22, 0x09, 0x89, 0x19, 0x22, 0x8d, 0x20, 0xb7, 0x86, 0x9f, 0x1c, 0x74, 0x1f, 0x78, 0x8d, 0xdd, 0x17, 0x37, 0x18, 0x6b, 0xa3, 0x2e, 0xbc, 0x1e, 0x17, 0xd9, 0xa1, 0x7d, 0xb8, 0x67, 0x19, 0x4f, 0x9e, 0xd3, 0xab, 0x51, 0x20, 0x0a, 0x9b, 0xfe, 0x9f, 0x9a, 0x21, 0x80, 0x99, 0x59, 0x9a, 0x83, 0x24, 0xd8, 0x96, 0xe2, 0x90, 0xc9, 0x27, 0x78, 0x8e, 0xa3, 0x84, 0xe5, 0x27, 0xa1, 0x89, 0xd5, 0x7a, 0x18, 0x28, 0x97, 0x89, 0x4e, 0x71, 0x28, 0x28, 0x49, 0x88, 0xbc, 0x65, 0x09, 0x28, 0xdf, 0x89, 0x52, 0x60, 0x9f, 0x21, 0x1d, 0x81, 0xe4, 0x48, 0x18, 0x22, 0x2c, 0x7e, 0xd3, 0x28, 0x15, 0x23, 0x52, 0x81, 0x74, 0x24, 0x03, 0x23, 0x60, 0x86, 0x7f, 0x1f, 0x7f, 0x24, 0x66, 0x87, 0x5b, 0x1c, 0xb1, 0x25, 0x48, 0x88, 0x10, 0x1a, 0x6e, 0x10, 0x6d, 0xa2, 0x97, 0xcd, 0x67, 0x12, 0x50, 0x9d, 0xf4, 0xba, 0xf9, 0x15, 0xf9, 0x9b, 0x62, 0xb2, 0x28, 0x17, 0x69, 0x97, 0xec, 0xa6, 0x96, 0x1d, 0x22, 0x94, 0xfd, 0x99, 0xd7, 0x23, 0x36, 0x8f, 0x75, 0x8e, 0xae, 0x22, 0xee, 0x85, 0x47, 0x7b, 0x8e, 0x27, 0x6c, 0x81, 0xb2, 0x73, 0x9e, 0x29, 0x08, 0x7e, 0x37, 0x67, 0xe1, 0x29, 0xa9, 0x7a, 0xa0, 0x59, 0x67, 0x28, 0x2a, 0x79, 0x6d, 0x4c, 0x71, 0x24, 0x2f, 0x72, 0xa4, 0x2d, 0xa0, 0x24, 0xcd, 0x75, 0x8a, 0x25, 0xff, 0x25, 0x68, 0x7d, 0x0d, 0x21, 0x66, 0x26, 0x8d, 0x80, 0xf3, 0x1d, 0x9a, 0x26, 0xc3, 0x84, 0xe1, 0x19, 0x9d, 0x27, 0xbd, 0x86, 0x1e, 0x17, 0xf3, 0x0e, 0x2e, 0x9a, 0x10, 0xcd, 0xe2, 0x0f, 0xff, 0x99, 0xe2, 0xc7, 0x72, 0x12, 0x21, 0x94, 0x63, 0xb5, 0x47, 0x14, 0x12, 0x89, 0xe0, 0x9e, 0x33, 0x1c, 0x09, 0x84, 0xb8, 0x92, 0x0e, 0x1e, 0x2c, 0x7d, 0x67, 0x7f, 0x5d, 0x21, 0xdd, 0x77, 0xb5, 0x74, 0x02, 0x27, 0x0b, 0x75, 0x09, 0x69, 0xff, 0x28, 0xb3, 0x71, 0xb7, 0x5d, 0x16, 0x29, 0xc1, 0x6e, 0xdc, 0x4f, 0xdd, 0x26, 0x58, 0x6a, 0x6b, 0x38, 0xa4, 0x25, 0x31, 0x69, 0x87, 0x25, 0xff, 0x28, 0x04, 0x6f, 0x56, 0x22, 0x7b, 0x28, 0xca, 0x73, 0xd8, 0x1e, 0xec, 0x29, 0x62, 0x7b, 0x6e, 0x1b, 0x2e, 0x2d, 0xe8, 0x81, 0x9a, 0x1a, 0x96, 0x5a, 0xf9, 0x90, 0x3c, 0x15, 0x50, 0x0e, 0xc3, 0x90, 0x16, 0xce, 0xf4, 0x0b, 0x9e, 0x90, 0x4d, 0xc8, 0x22, 0x0a, 0xe0, 0x89, 0x6c, 0xb3, 0x65, 0x13, 0xeb, 0x7f, 0x03, 0x9c, 0x11, 0x19, 0x93, 0x76, 0xa1, 0x88, 0xf9, 0x1a, 0x50, 0x70, 0x4f, 0x7a, 0x58, 0x1f, 0x0f, 0x6a, 0x92, 0x6a, 0xe0, 0x26, 0x86, 0x65, 0x90, 0x5d, 0x09, 0x27, 0x96, 0x63, 0x62, 0x4f, 0xa4, 0x27, 0xbc, 0x5e, 0xaf, 0x3d, 0x7f, 0x28, 0x30, 0x5c, 0xb7, 0x2a, 0xfc, 0x27, 0xc9, 0x5f, 0x52, 0x1f, 0xbc, 0x2c, 0x27, 0x66, 0x7c, 0x1a, 0x61, 0x36, 0xc8, 0x6f, 0xae, 0x19, 0x1a, 0x4d, 0xbc, 0x7a, 0x2b, 0x18, 0x9a, 0x5d, 0xcb, 0x80, 0xc9, 0x16, 0xfe, 0x6e, 0xab, 0x94, 0x34, 0x1a, 0xa8, 0x0c, 0x7a, 0x86, 0x01, 0xd1, 0xa3, 0x08, 0xbd, 0x84, 0x37, 0xc9, 0xc9, 0x09, 0x5b, 0x7f, 0x6d, 0xb7, 0x89, 0x0e, 0xa2, 0x74, 0xb5, 0x9a, 0xa6, 0x14, 0x7b, 0x6a, 0x6d, 0x83, 0xf9, 0x17, 0xd8, 0x61, 0x1b, 0x72, 0x49, 0x1e, 0x55, 0x5b, 0x92, 0x62, 0xa8, 0x25, 0x2a, 0x58, 0x4e, 0x53, 0xec, 0x28, 0x25, 0x53, 0xa0, 0x44, 0x10, 0x29, 0x8f, 0x50, 0x8a, 0x32, 0x3c, 0x2b, 0x8d, 0x4f, 0xdb, 0x1f, 0xeb, 0x35, 0x11, 0x57, 0x7c, 0x1a, 0x0c, 0x41, 0xed, 0x61, 0xb9, 0x16, 0x8c, 0x4d, 0x0d, 0x6b, 0xdd, 0x17, 0xab, 0x5e, 0x1d, 0x78, 0x53, 0x18, 0xfc, 0x69, 0x69, 0x82, 0x56, 0x18, 0xdf, 0x79, 0x54, 0x91, 0xb5, 0x1a, 0x69, 0x08, 0x84, 0x73, 0x1a, 0xd5, 0x42, 0x01, 0x90, 0x73, 0xcf, 0xcd, 0x24, 0x07, 0x35, 0x6f, 0xed, 0xb7, 0x5d, 0x0a, 0x6a, 0x65, 0xce, 0x9b, 0x73, 0x0e, 0x4d, 0x5c, 0x0a, 0x80, 0x4b, 0x1b, 0xe3, 0x54, 0x29, 0x6e, 0x67, 0x26, 0x4f, 0x50, 0x76, 0x5f, 0xbd, 0x2c, 0xfa, 0x4d, 0x0f, 0x50, 0xc6, 0x32, 0x61, 0x4a, 0x0b, 0x41, 0x03, 0x35, 0xd9, 0x48, 0x89, 0x30, 0x32, 0x39, 0x6f, 0x49, 0x69, 0x1f, 0xa7, 0x44, 0xc5, 0x53, 0x74, 0x1b, 0x10, 0x50, 0x56, 0x5d, 0x8a, 0x16, 0xa8, 0x5d, 0xa5, 0x69, 0xf5, 0x18, 0x89, 0x6a, 0x40, 0x75, 0x4b, 0x19, 0x64, 0x76, 0x0f, 0x7f, 0x17, 0x19, 0xeb, 0x83, 0x0e, 0x8b, 0x2b, 0x1a, 0xbc, 0x10, 0xf2, 0x65, 0x16, 0xd5, 0xab, 0x08, 0x30, 0x65, 0x88, 0xce, 0x97, 0x01, 0xcd, 0x60, 0x06, 0xba, 0x83, 0x07, 0x00, 0x54, 0x38, 0x98, 0xc5, 0x17, 0x16, 0x4e, 0x30, 0x7f, 0x0a, 0x24, 0xc2, 0x49, 0xe9, 0x6d, 0xda, 0x30, 0x13, 0x45, 0x79, 0x5e, 0x15, 0x37, 0xb8, 0x42, 0x0b, 0x4e, 0xe7, 0x3e, 0x87, 0x3f, 0x88, 0x3f, 0xa5, 0x43, 0x3b, 0x41, 0x4f, 0x30, 0x22, 0x47, 0x98, 0x43, 0x78, 0x20, 0xd6, 0x52, 0x68, 0x4e, 0x9c, 0x1d, 0x89, 0x5d, 0x1a, 0x59, 0x99, 0x19, 0xc0, 0x68, 0xc2, 0x65, 0x44, 0x19, 0x4f, 0x75, 0xd0, 0x71, 0x1d, 0x1a, 0x72, 0x82, 0x02, 0x7c, 0x3f, 0x1b, 0xd0, 0x8c, 0x76, 0x85, 0x99, 0x1c, 0x18, 0x2b, 0x80, 0x30, 0x25, 0xd1, 0x1a, 0x11, 0x61, 0x51, 0xeb, 0xd0, 0x5d, 0x03, 0x60, 0x4c, 0x23, 0xbd, 0xb9, 0x0e, 0x42, 0x41, 0xfd, 0x97, 0xf2, 0x23, 0x05, 0x41, 0x2c, 0x80, 0x98, 0x32, 0x28, 0x3e, 0x19, 0x6e, 0x37, 0x3c, 0x88, 0x3a, 0xb1, 0x5e, 0x33, 0x44, 0xed, 0x38, 0xb1, 0x4f, 0x86, 0x4b, 0x1e, 0x37, 0xb6, 0x40, 0x26, 0x50, 0x6e, 0x38, 0x69, 0x30, 0xe4, 0x54, 0xdf, 0x3a, 0xc2, 0x22, 0x6b, 0x5e, 0xf8, 0x46, 0x26, 0x1f, 0x67, 0x68, 0x98, 0x51, 0x95, 0x1b, 0xf8, 0x73, 0x0d, 0x5d, 0x49, 0x1a, 0xa9, 0x80, 0xd8, 0x69, 0xe8, 0x1c, 0x89, 0x8e, 0x33, 0x76, 0x5d, 0x1e, 0x60, 0x96, 0xba, 0x7e, 0x81, 0x1e, 0x9b, 0x31, 0xb1, 0x2e, 0x91, 0xd0, 0x71, 0x2b, 0x75, 0x30, 0x19, 0xd1, 0x0e, 0x09, 0x40, 0x35, 0x83, 0xbe, 0xc1, 0x1d, 0x50, 0x34, 0xeb, 0xa0, 0xff, 0x31, 0xfc, 0x33, 0xc8, 0x82, 0xfb, 0x41, 0xa1, 0x32, 0xb0, 0x70, 0x87, 0x4c, 0xb2, 0x31, 0x36, 0x61, 0x96, 0x52, 0x59, 0x2f, 0xb2, 0x53, 0xd3, 0x57, 0x56, 0x2e, 0x62, 0x43, 0x38, 0x5c, 0x52, 0x2d, 0xb4, 0x32, 0x77, 0x60, 0xa1, 0x2f, 0x10, 0x23, 0x05, 0x69, 0x96, 0x39, 0x56, 0x1f, 0xde, 0x73, 0x9d, 0x45, 0xcb, 0x1c, 0x8e, 0x7e, 0x17, 0x52, 0x90, 0x1b, 0x7a, 0x8d, 0x08, 0x62, 0x4b, 0x1f, 0x51, 0x96, 0xcd, 0x6d, 0xc5, 0x20, 0x4e, 0x9e, 0x94, 0x74, 0xea, 0x1f, 0x1e, 0x2b, 0xae, 0x30, 0x04, 0xd1, 0x25, 0x2b, 0xa2, 0x2f, 0xfb, 0xd1, 0x1b, 0x21, 0x99, 0x25, 0x6d, 0xc4, 0x22, 0x33, 0x3c, 0x26, 0x14, 0xa8, 0xc3, 0x44, 0x65, 0x28, 0x1e, 0x8c, 0xb7, 0x50, 0xf3, 0x29, 0x07, 0x78, 0x2f, 0x59, 0x56, 0x28, 0x38, 0x67, 0xcb, 0x5f, 0x2d, 0x26, 0x01, 0x58, 0x27, 0x63, 0xd9, 0x23, 0xbb, 0x47, 0x92, 0x67, 0x5e, 0x21, 0x62, 0x35, 0x11, 0x6a, 0xa6, 0x20, 0x43, 0x23, 0xf9, 0x73, 0x4d, 0x29, 0x7b, 0x1f, 0x98, 0x7c, 0xec, 0x35, 0x1a, 0x1b, 0x78, 0x89, 0xa1, 0x45, 0xc3, 0x1c, 0x10, 0x95, 0xc2, 0x53, 0xe8, 0x1e, 0x97, 0x9d, 0xd3, 0x5f, 0x2c, 0x20, 0x56, 0xa8, 0xa2, 0x6c, 0x75, 0x1d, 0xa5, 0x2b, 0xc4, 0x2f, 0xf6, 0xd1, 0x2d, 0x2b, 0xce, 0x2f, 0xdc, 0xd1, 0x27, 0x39, 0x29, 0x16, 0x34, 0xcb, 0x7c, 0x49, 0x60, 0x1a, 0x18, 0xaf, 0xea, 0x5a, 0x12, 0x1d, 0xd5, 0x91, 0xfb, 0x62, 0xdf, 0x20, 0xbb, 0x7e, 0xcd, 0x69, 0x6b, 0x1f, 0xa7, 0x6d, 0xfc, 0x6c, 0x06, 0x1c, 0x84, 0x5d, 0x2d, 0x70, 0xb4, 0x1a, 0x3f, 0x4c, 0xf8, 0x75, 0x2d, 0x1a, 0x6c, 0x3c, 0xa4, 0x7a, 0xb6, 0x1d, 0x54, 0x2e, 0xa8, 0x7f, 0xd7, 0x20, 0x54, 0x23, 0x27, 0x88, 0x12, 0x24, 0x3a, 0x1e, 0x46, 0x92, 0x5e, 0x33, 0x50, 0x19, 0x75, 0x99, 0xf0, 0x44, 0xdd, 0x1a, 0xcf, 0xa6, 0x5b, 0x53, 0xd1, 0x1e, 0x2a, 0xac, 0x02, 0x59, 0x97, 0x1d, 0x9e, 0x2b, 0x83, 0x31, 0xa4, 0xcf, 0xe7, 0x2c, 0x9b, 0x2e, 0xdd, 0xd0, 0x8b, 0x63, 0x98, 0x19, 0x07, 0xd2, 0x77, 0x71, 0x09, 0x14, 0x64, 0xbe, 0x3a, 0x76, 0x7b, 0x1a, 0xea, 0xa1, 0xff, 0x7d, 0x62, 0x1c, 0xd2, 0x8f, 0x7c, 0x7f, 0x60, 0x1b, 0xbb, 0x7c, 0x74, 0x80, 0x70, 0x19, 0x0e, 0x6a, 0x95, 0x83, 0xed, 0x19, 0x45, 0x59, 0xdf, 0x86, 0xc0, 0x1a, 0x3b, 0x49, 0xc4, 0x8b, 0x9a, 0x1a, 0x68, 0x3a, 0x39, 0x8f, 0x2c, 0x1c, 0xe2, 0x29, 0x19, 0x97, 0xf0, 0x20, 0x0d, 0x22, 0x76, 0xa0, 0x71, 0x21, 0xa7, 0x1e, 0xdb, 0xaa, 0xd4, 0x2a, 0x89, 0x1e, 0x0e, 0xad, 0x5c, 0x47, 0x44, 0x1c, 0xb1, 0xae, 0xb8, 0x51, 0x99, 0x1c, 0x68, 0x2c, 0x90, 0x2e, 0xf6, 0xd0, 0x91, 0x75, 0xb2, 0x1f, 0x88, 0xd8, 0xf3, 0x82, 0x1c, 0x1c, 0xbd, 0xd1, 0xcc, 0x8f, 0x17, 0x18, 0x5b, 0xc7, 0xee, 0x92, 0x54, 0x1b, 0x3c, 0xb6, 0x12, 0x99, 0x12, 0x1e, 0x8f, 0xa5, 0xc5, 0x98, 0xec, 0x1e, 0x08, 0x92, 0x38, 0x98, 0xec, 0x1b, 0xf8, 0x7c, 0x19, 0x98, 0x4f, 0x1c, 0xed, 0x67, 0x0c, 0x9a, 0x19, 0x1c, 0x23, 0x56, 0x6c, 0x9d, 0x0e, 0x1b, 0xa8, 0x45, 0x94, 0xa0, 0xe5, 0x1b, 0x8f, 0x36, 0x7a, 0xa3, 0xc8, 0x1d, 0x5c, 0x25, 0x39, 0xae, 0x88, 0x1d, 0x39, 0x20, 0x2a, 0xc1, 0x98, 0x23, 0xb9, 0x22, 0x13, 0xc4, 0x3a, 0x27, 0x46, 0x23, 0x9d, 0xc5, 0xa7, 0x29, 0x38, 0x24, 0x73, 0x80, 0xbf, 0x21, 0xb0, 0xd9, 0xe7, 0x8b, 0x1f, 0x20, 0xd4, 0xd5, 0xd9, 0x98, 0x41, 0x1f, 0xe2, 0xd2, 0x08, 0xa2, 0x91, 0x1b, 0xb7, 0xcb, 0xe1, 0xae, 0xd8, 0x1c, 0x87, 0xc5, 0x44, 0xaf, 0x67, 0x1d, 0xe2, 0xb7, 0x5f, 0xaf, 0x2f, 0x1d, 0xd9, 0xa2, 0xdf, 0xaf, 0x54, 0x1c, 0x63, 0x8d, 0x49, 0xae, 0x64, 0x1c, 0x9f, 0x75, 0xa8, 0xae, 0x47, 0x1c, 0xe8, 0x61, 0x50, 0xad, 0xd0, 0x1b, 0xbf, 0x50, 0x09, 0xad, 0x38, 0x1a, 0x81, 0x40, 0x13, 0xb8, 0x98, 0x13, 0x68, 0x27, 0x6e, 0xc3, 0x9b, 0x1f, 0xcd, 0x25, 0x1f, 0xc5, 0xac, 0x24, 0x71, 0x25, 0xb8, 0xc6, 0xc7, 0x27, 0x01, 0x26, 0x0e, 0xc7, 0x78, 0x28, 0xa0, 0x26, 0x45, 0x91, 0x11, 0x23, 0x37, 0xd8, 0x2f, 0x9b, 0x1a, 0x22, 0x66, 0xd5, 0x48, 0xa6, 0x62, 0x20, 0x65, 0xd2, 0x22, 0xb5, 0x06, 0x1c, 0xce, 0xcf, 0x3b, 0xb7, 0xfb, 0x1d, 0x33, 0xcb, 0x39, 0xc6, 0x1f, 0x1d, 0xd9, 0xc5, 0xe1, 0xc5, 0x08, 0x1e, 0xa5, 0xb3, 0xdb, 0xc4, 0xd3, 0x1d, 0xd5, 0x9e, 0xd8, 0xc3, 0x8f, 0x1c, 0x46, 0x84, 0x2a, 0xc2, 0x39, 0x1c, 0x2a, 0x71, 0x19, 0xc1, 0xe5, 0x1a, 0x48, 0x5d, 0x63, 0xc2, 0x51, 0x18, 0xf9, 0x4d, 0x29, 0xc3, 0x2b, 0x14, 0xda, 0x30, 0x50, 0xc7, 0x18, 0x21, 0xb8, 0x27, 0xef, 0xc7, 0xe4, 0x24, 0xd8, 0x27, 0xba, 0xc8, 0x62, 0x26, 0xd9, 0x27, 0x9e, 0xc8, 0xb7, 0x28, 0x3b, 0x27, 0x8c, 0x1c, 0xb8, 0xa7, 0x78, 0xb0, 0xb4, 0x1f, 0x73, 0xa5, 0x93, 0xa9, 0x85, 0x1f, 0x85, 0xa4, 0x70, 0xa6, 0xeb, 0x21, 0xb0, 0xa2, 0xee, 0xa1, 0x14, 0x24, 0xf7, 0x9f, 0x91, 0x99, 0x59, 0x23, 0xf0, 0xa0, 0x84, 0x93, 0x6c, 0x26, 0x86, 0x9e, 0xa7, 0x8e, 0x59, 0x26, 0x76, 0x9d, 0xcc, 0x8b, 0xbb, 0x28, 0x7e, 0x94, 0x19, 0x78, 0xd8, 0x28, 0xc8, 0x94, 0xe4, 0x70, 0x8e, 0x28, 0x74, 0x8e, 0xa3, 0x66, 0x7e, 0x26, 0xa8, 0x9b, 0x16, 0x64, 0x05, 0x23, 0xa2, 0x9c, 0x6b, 0x5d, 0x7b, 0x1d, 0x9e, 0x8c, 0x9e, 0x27, 0x07, 0x1c, 0x62, 0x92, 0x06, 0x21, 0xd2, 0x1c, 0xd2, 0x92, 0x7a, 0x1d, 0x78, 0x1d, 0x4b, 0x97, 0xe1, 0x1a, 0x37, 0x1a, 0x0c, 0xa6, 0xc3, 0xbd, 0xd9, 0x1b, 0xc9, 0xa5, 0x5d, 0xb0, 0x8f, 0x1f, 0x03, 0xa2, 0xc4, 0xa6, 0xd8, 0x20, 0x2a, 0xa1, 0x0d, 0xa3, 0x16, 0x23, 0x4c, 0x9e, 0xfe, 0x9a, 0xd8, 0x25, 0x26, 0x9b, 0xd4, 0x94, 0x3d, 0x23, 0xec, 0x9c, 0xc0, 0x8d, 0x71, 0x26, 0xd0, 0x99, 0x96, 0x88, 0x7f, 0x28, 0x67, 0x8e, 0xb6, 0x75, 0x5c, 0x28, 0xf0, 0x8d, 0x2d, 0x67, 0xd4, 0x28, 0x97, 0x8c, 0xa6, 0x64, 0x21, 0x21, 0x22, 0x8c, 0xb7, 0x57, 0xd2, 0x20, 0x67, 0x8b, 0x77, 0x45, 0x00, 0x21, 0x33, 0x8a, 0x12, 0x26, 0xbb, 0x1f, 0xa7, 0x88, 0x56, 0x1f, 0xd5, 0x1e, 0x5a, 0x8f, 0x86, 0x1a, 0x0f, 0x1f, 0xb3, 0x90, 0x9f, 0x17, 0x4a, 0x19, 0x4a, 0xa5, 0x3f, 0xbe, 0x6f, 0x18, 0xd5, 0xa3, 0xe3, 0xbb, 0x15, 0x1a, 0x40, 0xa1, 0x9c, 0xae, 0xb2, 0x1f, 0x9a, 0x9e, 0x4f, 0xa2, 0x4b, 0x21, 0x2b, 0x9b, 0x9f, 0x9c, 0xf2, 0x24, 0xaa, 0x98, 0xdb, 0x92, 0xc6, 0x25, 0x06, 0x96, 0x1c, 0x8c, 0x65, 0x27, 0x7f, 0x8b, 0x23, 0x7b, 0xcb, 0x2a, 0x59, 0x87, 0xa6, 0x72, 0x49, 0x28, 0x37, 0x89, 0xde, 0x65, 0xc3, 0x28, 0xc8, 0x89, 0xb1, 0x60, 0x9b, 0x20, 0xe5, 0x83, 0xa1, 0x48, 0xa9, 0x21, 0xac, 0x80, 0x0a, 0x28, 0xc8, 0x22, 0x59, 0x86, 0x0d, 0x23, 0x27, 0x23, 0x3c, 0x87, 0x9e, 0x1f, 0x07, 0x24, 0x64, 0x88, 0x71, 0x1b, 0xf4, 0x21, 0xa9, 0x8a, 0xc8, 0x12, 0x3f, 0x11, 0x93, 0xa4, 0xf3, 0xd0, 0x2a, 0x12, 0xb8, 0xa0, 0xef, 0xbe, 0x2c, 0x16, 0x86, 0x9e, 0x74, 0xb5, 0xca, 0x16, 0xdd, 0x9a, 0xb1, 0xaa, 0x39, 0x1c, 0xe0, 0x97, 0x61, 0x9c, 0x83, 0x23, 0x0a, 0x91, 0xb6, 0x91, 0x17, 0x23, 0x47, 0x88, 0x25, 0x7e, 0x7f, 0x26, 0x2a, 0x80, 0x5f, 0x72, 0xe0, 0x28, 0xee, 0x81, 0x2e, 0x6b, 0x1e, 0x2a, 0x1e, 0x7f, 0x14, 0x5e, 0xcf, 0x27, 0xcd, 0x7b, 0x17, 0x4c, 0x7b, 0x21, 0x40, 0x75, 0xe3, 0x34, 0x25, 0x24, 0x3c, 0x78, 0xed, 0x25, 0xec, 0x25, 0x76, 0x7d, 0xeb, 0x20, 0xd9, 0x26, 0x05, 0x84, 0x70, 0x1b, 0x52, 0x27, 0x1a, 0x85, 0xc9, 0x18, 0x6b, 0x44, 0x49, 0x90, 0xe9, 0x11, 0x90, 0x13, 0x56, 0x9c, 0x22, 0xcf, 0xcb, 0x10, 0x41, 0x9d, 0xd0, 0xca, 0xfd, 0x12, 0x57, 0x97, 0xca, 0xb9, 0x2f, 0x12, 0x3d, 0x94, 0x72, 0xad, 0x0f, 0x1b, 0xb7, 0x89, 0x96, 0x97, 0x93, 0x1d, 0xad, 0x82, 0x64, 0x85, 0x4d, 0x21, 0x62, 0x7b, 0x2f, 0x75, 0x3c, 0x26, 0xc1, 0x77, 0x80, 0x6c, 0xd7, 0x28, 0xac, 0x74, 0x35, 0x5f, 0x90, 0x29, 0xae, 0x70, 0x8a, 0x50, 0x87, 0x26, 0x0a, 0x6c, 0xd4, 0x3a, 0xf5, 0x24, 0xc7, 0x6b, 0x5f, 0x26, 0xbf, 0x28, 0x83, 0x71, 0x96, 0x21, 0xf2, 0x29, 0x60, 0x76, 0xc0, 0x1d, 0x04, 0x2d, 0xe2, 0x7c, 0xbe, 0x1c, 0x0d, 0x51, 0xce, 0x8c, 0xad, 0x14, 0xbd, 0x5d, 0xd1, 0x92, 0x16, 0x15, 0x67, 0x11, 0x0b, 0x92, 0xc6, 0xd1, 0xd6, 0x0c, 0xa5, 0x94, 0x42, 0xcb, 0xec, 0x0a, 0xff, 0x8d, 0x75, 0xb8, 0x89, 0x11, 0x40, 0x87, 0x2a, 0xa7, 0x4e, 0x15, 0x7c, 0x7e, 0xc3, 0x93, 0x01, 0x1c, 0x40, 0x74, 0x81, 0x7d, 0x8a, 0x1e, 0x42, 0x6d, 0xa9, 0x6e, 0x35, 0x26, 0x09, 0x6a, 0x75, 0x62, 0xba, 0x28, 0xe0, 0x66, 0x9a, 0x53, 0x22, 0x27, 0xe8, 0x62, 0x01, 0x40, 0x98, 0x25, 0x74, 0x5e, 0x50, 0x2b, 0x1d, 0x27, 0xa7, 0x5d, 0xf9, 0x17, 0xf9, 0x32, 0xad, 0x68, 0xe2, 0x19, 0xca, 0x43, 0xfb, 0x72, 0xa5, 0x19, 0xcf, 0x53, 0xd1, 0x7f, 0x51, 0x18, 0x52, 0x67, 0x2d, 0x89, 0x5d, 0x17, 0x05, 0x6f, 0xa8, 0x95, 0x56, 0x1a, 0xd1, 0x0f, 0x48, 0x89, 0x8b, 0xd4, 0xa8, 0x0b, 0x54, 0x88, 0xc9, 0xce, 0x2e, 0x08, 0x2c, 0x86, 0xc9, 0xc2, 0xb2, 0x0f, 0x0d, 0x7a, 0x15, 0xa2, 0xab, 0x14, 0x85, 0x70, 0xaf, 0x8a, 0xc6, 0x1c, 0xd2, 0x68, 0x5d, 0x7a, 0x90, 0x26, 0x1a, 0x63, 0xb0, 0x6b, 0x7f, 0x2c, 0xde, 0x5f, 0xfa, 0x5d, 0x3d, 0x30, 0x74, 0x5c, 0x55, 0x4d, 0xc3, 0x31, 0xe5, 0x59, 0x22, 0x3b, 0x2c, 0x33, 0xcc, 0x58, 0x7f, 0x29, 0x23, 0x36, 0x69, 0x58, 0x9b, 0x16, 0xfa, 0x45, 0xf1, 0x64, 0x00, 0x17, 0x3c, 0x54, 0x1c, 0x70, 0x35, 0x18, 0xb0, 0x61, 0x5e, 0x7b, 0x71, 0x18, 0xdf, 0x6d, 0xe2, 0x87, 0xc4, 0x18, 0xe4, 0x7c, 0xb9, 0x95, 0x7e, 0x1a, 0xcf, 0x0b, 0xcd, 0x77, 0xc0, 0xd8, 0xca, 0x06, 0x30, 0x79, 0x43, 0xd2, 0x15, 0x06, 0x14, 0x79, 0x0e, 0xc2, 0xb4, 0x0a, 0x21, 0x6b, 0x8c, 0xa3, 0x48, 0x14, 0x96, 0x64, 0x37, 0x8b, 0x59, 0x24, 0x77, 0x5d, 0xd3, 0x78, 0x90, 0x2e, 0x67, 0x59, 0x19, 0x69, 0x0d, 0x35, 0xbe, 0x55, 0x3a, 0x59, 0x5d, 0x3a, 0x55, 0x52, 0x4b, 0x49, 0xc7, 0x3e, 0x70, 0x51, 0x63, 0x38, 0xec, 0x42, 0x46, 0x53, 0x26, 0x29, 0x3f, 0x46, 0x50, 0x56, 0x04, 0x19, 0x2d, 0x53, 0x22, 0x60, 0x8e, 0x16, 0x3f, 0x60, 0x4d, 0x6c, 0xf4, 0x18, 0xc6, 0x6d, 0xd8, 0x79, 0x24, 0x19, 0xa7, 0x7b, 0xb8, 0x84, 0x6e, 0x19, 0xf7, 0x88, 0x07, 0x91, 0xe2, 0x1c, 0x31, 0x13, 0xe9, 0x68, 0xd5, 0xd8, 0xf8, 0x0c, 0x79, 0x6a, 0xef, 0xd3, 0xce, 0x01, 0x63, 0x66, 0xfd, 0xc4, 0x40, 0x10, 0x2d, 0x5d, 0xaf, 0xa3, 0x4b, 0x21, 0xf0, 0x57, 0x94, 0x89, 0x2e, 0x2f, 0x77, 0x53, 0x44, 0x77, 0x56, 0x39, 0x3d, 0x4e, 0x86, 0x67, 0x25, 0x41, 0x3f, 0x4b, 0x35, 0x58, 0x31, 0x47, 0xd8, 0x48, 0xd3, 0x48, 0xf7, 0x4c, 0x5c, 0x4b, 0x31, 0x39, 0x4f, 0x50, 0x68, 0x4d, 0x3b, 0x2a, 0x9d, 0x54, 0xe9, 0x51, 0x5a, 0x1c, 0x51, 0x5f, 0x50, 0x5c, 0x99, 0x19, 0x3a, 0x6c, 0x1d, 0x68, 0xb8, 0x19, 0x92, 0x78, 0x42, 0x74, 0xb9, 0x1b, 0x15, 0x85, 0x8b, 0x80, 0x40, 0x1b, 0xf4, 0x8f, 0x4d, 0x89, 0x9d, 0x1c, 0xf3, 0x2b, 0x8d, 0x30, 0x2f, 0xd1, 0x25, 0x15, 0x78, 0x57, 0xdb, 0xd5, 0x37, 0x03, 0x10, 0x52, 0xe2, 0xc7, 0xc7, 0x1a, 0x6e, 0x4e, 0xf2, 0xa7, 0x4e, 0x2e, 0x49, 0x4b, 0x56, 0x8c, 0x81, 0x3d, 0x4c, 0x47, 0x7b, 0x77, 0x8e, 0x47, 0x3e, 0x44, 0x73, 0x67, 0x5a, 0x4f, 0x09, 0x42, 0xa7, 0x58, 0xe4, 0x54, 0xa6, 0x41, 0xa4, 0x49, 0x99, 0x59, 0x71, 0x42, 0x22, 0x39, 0xd7, 0x5d, 0x4a, 0x44, 0x8b, 0x2c, 0x22, 0x61, 0xf7, 0x49, 0xb0, 0x1e, 0xed, 0x6b, 0x11, 0x54, 0xaa, 0x1a, 0xfb, 0x77, 0x8f, 0x61, 0x94, 0x1b, 0x8f, 0x86, 0x2f, 0x6f, 0xce, 0x1d, 0xaf, 0x90, 0xcf, 0x79, 0x50, 0x1e, 0x67, 0x97, 0xe3, 0x80, 0x1d, 0x1d, 0x94, 0x2b, 0xa2, 0x30, 0x1f, 0xd1, 0x2b, 0x2b, 0x8d, 0x30, 0x2e, 0xd1, 0x25, 0x15, 0x1b, 0x3f, 0x32, 0xca, 0xc0, 0x2a, 0x17, 0x3f, 0xb0, 0xad, 0x0c, 0x3e, 0xa8, 0x3e, 0x1d, 0x8b, 0x31, 0x4d, 0x88, 0x3c, 0x60, 0x79, 0x0b, 0x57, 0x2c, 0x3a, 0xb2, 0x6a, 0x2f, 0x5c, 0x17, 0x39, 0x6f, 0x5c, 0x8c, 0x61, 0x28, 0x37, 0x4f, 0x4c, 0x49, 0x65, 0x1c, 0x37, 0x35, 0x3b, 0x96, 0x68, 0xc4, 0x38, 0x94, 0x2c, 0xaa, 0x6c, 0xa0, 0x3c, 0x2e, 0x1f, 0x63, 0x75, 0x37, 0x48, 0xe1, 0x1b, 0xb6, 0x83, 0x1c, 0x57, 0x10, 0x1c, 0x27, 0x90, 0x6d, 0x65, 0x59, 0x1f, 0xbf, 0x97, 0xd7, 0x70, 0x33, 0x1f, 0x5d, 0xa2, 0x37, 0x77, 0x5c, 0x1e, 0xd8, 0x2b, 0xb9, 0x30, 0x0f, 0xd1, 0x32, 0x2b, 0xb9, 0x30, 0x0f, 0xd1, 0x32, 0x2f, 0x4e, 0x2f, 0x4e, 0xc9, 0xae, 0x3f, 0x3a, 0x2f, 0x72, 0xb3, 0x8f, 0x53, 0x0c, 0x32, 0xa0, 0x92, 0x2e, 0x5e, 0x1d, 0x32, 0xfb, 0x7e, 0xb1, 0x64, 0x0e, 0x31, 0x8f, 0x6f, 0xc3, 0x68, 0xcb, 0x2f, 0x9c, 0x60, 0xc5, 0x6c, 0xeb, 0x2d, 0x1f, 0x4f, 0xb5, 0x70, 0x51, 0x2b, 0x74, 0x3e, 0x2b, 0x73, 0x6a, 0x2a, 0x44, 0x2d, 0x07, 0x76, 0xc0, 0x2c, 0xc5, 0x1e, 0x6b, 0x7e, 0xa3, 0x38, 0xee, 0x1b, 0x3a, 0x8c, 0x80, 0x47, 0xb8, 0x1c, 0x9e, 0x97, 0x2e, 0x55, 0x12, 0x1e, 0x99, 0xa1, 0x2b, 0x62, 0x3a, 0x1f, 0xd5, 0xab, 0xe0, 0x6f, 0xca, 0x1d, 0x4c, 0x2b, 0xd0, 0x30, 0x00, 0xd1, 0x38, 0x2c, 0x85, 0x2f, 0x0f, 0xd0, 0x96, 0x45, 0x7a, 0x1e, 0x75, 0xd9, 0x88, 0x58, 0x83, 0x22, 0x81, 0xbc, 0xaf, 0x65, 0xf9, 0x27, 0x09, 0x9e, 0xdc, 0x6e, 0xab, 0x29, 0x44, 0x88, 0x4d, 0x72, 0xac, 0x27, 0xa9, 0x77, 0xc0, 0x76, 0x9a, 0x25, 0x5c, 0x67, 0x02, 0x79, 0xcf, 0x23, 0x34, 0x54, 0xfe, 0x7b, 0x89, 0x1f, 0x86, 0x42, 0xac, 0x7d, 0x01, 0x1b, 0xc4, 0x2f, 0xde, 0x82, 0xea, 0x20, 0x62, 0x23, 0x40, 0x89, 0x8d, 0x26, 0x71, 0x1b, 0x4b, 0x94, 0x80, 0x36, 0x8d, 0x19, 0xae, 0x9f, 0x34, 0x48, 0xd5, 0x1d, 0x1d, 0xaa, 0xea, 0x54, 0x32, 0x1d, 0x54, 0xad, 0x3e, 0x5d, 0xaa, 0x1d, 0xb6, 0x2c, 0x85, 0x2f, 0x10, 0xd0, 0x96, 0x2c, 0xb2, 0x2e, 0xf2, 0xd0, 0xa4, 0x6c, 0x87, 0x1d, 0xfd, 0xd8, 0x57, 0x73, 0xdb, 0x17, 0x07, 0xc4, 0xf3, 0x7f, 0x58, 0x1d, 0x3f, 0xa4, 0xb8, 0x85, 0x0d, 0x21, 0x0b, 0x8d, 0xd0, 0x82, 0xd6, 0x1d, 0x4c, 0x80, 0x5e, 0x85, 0xa6, 0x1a, 0xc8, 0x6f, 0x86, 0x88, 0x20, 0x19, 0xa3, 0x5c, 0x05, 0x8a, 0xf0, 0x1a, 0x83, 0x4b, 0xcf, 0x8d, 0xf4, 0x1c, 0x3c, 0x3d, 0x4a, 0x93, 0x42, 0x1d, 0xb1, 0x2c, 0x27, 0x99, 0xc9, 0x20, 0x3d, 0x22, 0x9a, 0xa4, 0xf2, 0x23, 0x12, 0x1e, 0xe7, 0xb0, 0x2c, 0x30, 0x84, 0x1c, 0x22, 0xb0, 0x4d, 0x46, 0xc0, 0x1c, 0x88, 0xb0, 0x36, 0x53, 0x1e, 0x1c, 0x49, 0x2c, 0x9b, 0x2f, 0x01, 0xd0, 0x9c, 0x7b, 0xed, 0x21, 0xb8, 0xdb, 0x3a, 0x88, 0xd0, 0x1f, 0xfe, 0xd5, 0x26, 0x95, 0x2a, 0x1c, 0x64, 0xcd, 0x3f, 0x9d, 0xaa, 0x1a, 0x5b, 0xbf, 0x69, 0x9d, 0xd4, 0x1d, 0x45, 0xaa, 0x8b, 0x9b, 0xf6, 0x1e, 0x7c, 0x94, 0x28, 0x9d, 0x36, 0x1b, 0x6f, 0x7e, 0x6e, 0x9c, 0x0f, 0x1d, 0x8b, 0x69, 0x21, 0x9e, 0xf1, 0x1d, 0x11, 0x58, 0x87, 0xa1, 0xdb, 0x1b, 0xca, 0x47, 0xa1, 0xa3, 0x1a, 0x1a, 0xff, 0x36, 0xff, 0xa9, 0x94, 0x1a, 0xe2, 0x24, 0x90, 0xbb, 0xec, 0x1e, 0x34, 0x20, 0xb1, 0xc3, 0x61, 0x25, 0x5f, 0x23, 0x4f, 0xc5, 0x7a, 0x28, 0x72, 0x24, 0x7b, 0xc6, 0x9f, 0x2a, 0x1e, 0x25, 0x1d, 0x86, 0x29, 0x23, 0x7e, 0xdb, 0x3b, 0x90, 0x79, 0x22, 0xd8, 0xd7, 0x95, 0x9b, 0x12, 0x21, 0xf6, 0xd4, 0xa3, 0xa6, 0x13, 0x1e, 0x75, 0xcf, 0xa5, 0xb3, 0xc8, 0x1c, 0x99, 0xc9, 0xf2, 0xba, 0x0f, 0x1c, 0xe6, 0xc1, 0xc5, 0xb3, 0x34, 0x1e, 0x0c, 0xa5, 0xf0, 0xb6, 0x58, 0x1c, 0xde, 0x92, 0xe6, 0xb4, 0x66, 0x1c, 0xe8, 0x79, 0x42, 0xb1, 0x1c, 0x1d, 0x20, 0x63, 0x2f, 0xb0, 0x4e, 0x1b, 0xeb, 0x51, 0x23, 0xaf, 0xde, 0x1a, 0x83, 0x40, 0xff, 0xbb, 0xf2, 0x12, 0x2d, 0x26, 0xdd, 0xc5, 0x67, 0x21, 0x71, 0x26, 0x5c, 0xc6, 0xee, 0x25, 0x9a, 0x26, 0x95, 0xc7, 0xbf, 0x27, 0xe7, 0x26, 0xb9, 0xc8, 0x41, 0x29, 0x5c, 0x26, 0xd0, 0x94, 0x35, 0x24, 0x87, 0xd9, 0xb8, 0x9c, 0xf5, 0x23, 0xca, 0xd6, 0xfe, 0xa8, 0x7a, 0x22, 0x13, 0xd4, 0x51, 0xba, 0x17, 0x1d, 0x65, 0xd2, 0xcf, 0xbd, 0x13, 0x1d, 0x87, 0xce, 0x1c, 0xc5, 0xce, 0x1f, 0x81, 0xc4, 0x8c, 0xc7, 0x07, 0x1e, 0xd9, 0xb5, 0x11, 0xc6, 0xf8, 0x1d, 0xf6, 0xa0, 0x07, 0xc5, 0xa4, 0x1c, 0x61, 0x85, 0x03, 0xc4, 0x50, 0x1c, 0x27, 0x71, 0xd7, 0xc4, 0x13, 0x1a, 0x34, 0x5e, 0x30, 0xc5, 0x1b, 0x1b, 0x08, 0x4e, 0xca, 0xc5, 0xde, 0x17, 0x28, 0x32, 0x6c, 0xc8, 0x5b, 0x22, 0xe0, 0x28, 0xd0, 0xc8, 0xdd, 0x25, 0xbe, 0x28, 0x67, 0xc9, 0x97, 0x2a, 0x17, 0x29, 0x3f, 0xc9, 0xd4, 0x29, 0xb0, 0x28, 0x00, 0x1d, 0x3d, 0xa9, 0x0d, 0xb2, 0xce, 0x1f, 0xd3, 0xa7, 0x79, 0xab, 0x7d, 0x1f, 0xe7, 0xa6, 0x9a, 0xa9, 0x29, 0x21, 0x53, 0xa5, 0x3b, 0xa3, 0x6e, 0x24, 0xc3, 0xa1, 0xa9, 0x9b, 0x60, 0x23, 0xce, 0xa2, 0x71, 0x95, 0x49, 0x26, 0x82, 0xa0, 0xbd, 0x90, 0x38, 0x26, 0xb6, 0xa0, 0x4d, 0x8d, 0xc8, 0x28, 0xa0, 0x96, 0x83, 0x7a, 0xa1, 0x29, 0x58, 0x9a, 0xae, 0x75, 0x11, 0x2a, 0xff, 0x91, 0x39, 0x6a, 0x56, 0x24, 0xa9, 0x9f, 0x00, 0x62, 0x40, 0x24, 0x12, 0x9e, 0x77, 0x5e, 0xa3, 0x1b, 0x73, 0x93, 0xb2, 0x27, 0x57, 0x1b, 0xbb, 0x94, 0x13, 0x21, 0xc8, 0x1c, 0xb2, 0x9c, 0x41, 0x1d, 0x7a, 0x20, 0x23, 0xa0, 0xd7, 0x1d, 0x0f, 0x1a, 0xcf, 0xa8, 0x99, 0xbf, 0xe0, 0x1c, 0x77, 0xa7, 0x65, 0xb3, 0x21, 0x1f, 0x53, 0xa5, 0x3b, 0xa9, 0x75, 0x1f, 0xbe, 0xa3, 0x9f, 0xa5, 0xdb, 0x22, 0xea, 0xa1, 0x7f, 0x9d, 0x66, 0x24, 0xf1, 0x9e, 0x18, 0x96, 0x6f, 0x23, 0xca, 0x9e, 0xbf, 0x8f, 0x63, 0x26, 0x9c, 0x9b, 0x79, 0x8a, 0x2b, 0x28, 0x4e, 0x91, 0x7e, 0x77, 0xa4, 0x28, 0x83, 0x91, 0xeb, 0x6f, 0x5a, 0x28, 0x6f, 0x8d, 0xd8, 0x64, 0xfc, 0x22, 0xa5, 0x95, 0xb7, 0x5c, 0x19, 0x20, 0x04, 0x90, 0x2b, 0x44, 0x9b, 0x1e, 0x6c, 0x8a, 0x75, 0x25, 0x06, 0x1d, 0x31, 0x90, 0xe1, 0x1e, 0x62, 0x1e, 0x51, 0x91, 0xc0, 0x19, 0xe5, 0x20, 0x84, 0x99, 0x75, 0x18, 0xbe, 0x1a, 0x2c, 0xa7, 0x50, 0xc0, 0xbf, 0x19, 0xd3, 0xa6, 0x49, 0xbd, 0xc0, 0x19, 0xe4, 0xa4, 0xfc, 0xb4, 0xc9, 0x1c, 0x26, 0xa2, 0x7c, 0xa7, 0x68, 0x20, 0xbe, 0x9e, 0x6d, 0x9f, 0xf0, 0x24, 0x68, 0x9b, 0x5f, 0x95, 0x4d, 0x24, 0xe7, 0x98, 0x21, 0x8e, 0x62, 0x27, 0x87, 0x92, 0xea, 0x82, 0xa4, 0x29, 0xf8, 0x89, 0x5d, 0x73, 0x6c, 0x28, 0x1e, 0x8b, 0x8a, 0x67, 0xac, 0x28, 0x94, 0x8a, 0x2d, 0x60, 0x15, 0x20, 0xec, 0x85, 0xac, 0x49, 0x95, 0x20, 0xe2, 0x83, 0x34, 0x29, 0xf1, 0x21, 0xd6, 0x88, 0x4d, 0x23, 0x67, 0x23, 0x0c, 0x89, 0x0f, 0x1e, 0x6c, 0x20, 0x91, 0x8c, 0x52, 0x14, 0x9e, 0x22, 0xa5, 0x8f, 0xc6, 0x13, 0x5a, 0x13, 0x25, 0xa7, 0x50, 0xd2, 0xe0, 0x14, 0x48, 0xa4, 0x11, 0xc1, 0x45, 0x17, 0xf5, 0xa1, 0xe8, 0xb9, 0xae, 0x17, 0x48, 0x9e, 0x79, 0xae, 0xda, 0x1c, 0x55, 0x9a, 0xd5, 0xa0, 0xb2, 0x22, 0xcb, 0x94, 0x18, 0x93, 0x9f, 0x23, 0x0e, 0x8d, 0x8c, 0x86, 0x0b, 0x25, 0x91, 0x82, 0x69, 0x74, 0xf0, 0x28, 0xd9, 0x83, 0x1a, 0x6d, 0x23, 0x2a, 0x1b, 0x80, 0x9f, 0x60, 0x3c, 0x27, 0xd1, 0x7d, 0x5a, 0x4e, 0x02, 0x20, 0xa2, 0x79, 0xa9, 0x38, 0x2f, 0x23, 0x77, 0x7c, 0x67, 0x25, 0xf7, 0x25, 0x78, 0x81, 0x4a, 0x1f, 0x70, 0x26, 0x55, 0x85, 0x94, 0x19, 0xfe, 0x27, 0xcb, 0x87, 0x3c, 0x17, 0x8c, 0x45, 0x19, 0x93, 0x3c, 0x11, 0x38, 0x14, 0xe7, 0x9f, 0xc8, 0xd2, 0xb8, 0x11, 0x14, 0xa2, 0x0d, 0xce, 0xdf, 0x12, 0x90, 0x9b, 0xaa, 0xbd, 0xb1, 0x11, 0xed, 0x98, 0x71, 0xb1, 0xc7, 0x1b, 0x79, 0x8d, 0x91, 0x9b, 0xbb, 0x1c, 0xcb, 0x86, 0xc9, 0x8b, 0xc7, 0x21, 0xe7, 0x7d, 0x12, 0x79, 0x61, 0x26, 0xc3, 0x79, 0x7d, 0x6e, 0x41, 0x28, 0xa7, 0x76, 0xff, 0x62, 0x7d, 0x28, 0x70, 0x73, 0x4d, 0x52, 0x4f, 0x25, 0x7c, 0x70, 0x38, 0x3d, 0xf2, 0x24, 0xf1, 0x6e, 0x7a, 0x2a, 0x29, 0x29, 0x49, 0x73, 0x9b, 0x20, 0xa7, 0x2d, 0xdb, 0x7a, 0x0a, 0x1b, 0xbd, 0x31, 0xaa, 0x80, 0xe3, 0x19, 0xaa, 0x54, 0x51, 0x8e, 0xdc, 0x14, 0xb8, 0x60, 0xc0, 0x94, 0x1d, 0x15, 0x7e, 0x13, 0x44, 0x96, 0x4f, 0xd4, 0x94, 0x0f, 0x8e, 0x98, 0x68, 0xcf, 0xcb, 0x0f, 0xf9, 0x95, 0xa0, 0xc6, 0x1b, 0x10, 0xd8, 0x8b, 0x6d, 0xad, 0x3e, 0x14, 0xe2, 0x83, 0x8b, 0x98, 0xd2, 0x1d, 0xbd, 0x7a, 0xd4, 0x85, 0x05, 0x27, 0x44, 0x76, 0x02, 0x77, 0xb5, 0x2c, 0x46, 0x71, 0xc0, 0x6a, 0x1c, 0x2f, 0x86, 0x6d, 0xe2, 0x5b, 0xd8, 0x2f, 0xad, 0x6a, 0x01, 0x4a, 0x15, 0x2f, 0x25, 0x67, 0xce, 0x36, 0x28, 0x30, 0x89, 0x67, 0x2e, 0x22, 0x3b, 0x38, 0xb3, 0x6c, 0x50, 0x18, 0xb3, 0x46, 0x63, 0x75, 0xe0, 0x19, 0x35, 0x55, 0xcd, 0x81, 0x15, 0x18, 0x29, 0x6b, 0x35, 0x8e, 0xdc, 0x17, 0x4c, 0x6f, 0x61, 0x97, 0xaa, 0x18, 0xc2, 0x12, 0xc0, 0x8b, 0x98, 0xd7, 0x1c, 0x0e, 0xf0, 0x8d, 0x6d, 0xd2, 0x88, 0x09, 0x33, 0x8b, 0xfe, 0xc8, 0x74, 0x0b, 0x82, 0x82, 0x53, 0xaf, 0x0a, 0x1a, 0x5b, 0x77, 0x5c, 0x94, 0x94, 0x25, 0x3d, 0x71, 0x06, 0x82, 0xd8, 0x2d, 0x13, 0x6c, 0x00, 0x74, 0x8b, 0x33, 0x71, 0x67, 0x5e, 0x64, 0xe8, 0x37, 0x8e, 0x63, 0x76, 0x56, 0x46, 0x38, 0xc6, 0x60, 0xa5, 0x44, 0x81, 0x3b, 0xac, 0x60, 0x77, 0x31, 0xc3, 0x3f, 0x5d, 0x61, 0x85, 0x20, 0x67, 0x48, 0x6d, 0x67, 0xff, 0x17, 0xb5, 0x57, 0xcf, 0x74, 0x2a, 0x18, 0xf4, 0x64, 0x9f, 0x7f, 0xf0, 0x18, 0x9b, 0x71, 0xf3, 0x8c, 0x0e, 0x19, 0x6d, 0x7e, 0x92, 0x96, 0xb2, 0x1b, 0x0a, 0x0f, 0x7b, 0x7c, 0x5f, 0xdc, 0x25, 0x0e, 0xe1, 0x80, 0xef, 0xd6, 0x06, 0x07, 0x11, 0x7f, 0x2a, 0xc9, 0xba, 0x0c, 0xef, 0x73, 0x39, 0xad, 0xe9, 0x23, 0x64, 0x6c, 0xbb, 0x94, 0x85, 0x2d, 0x69, 0x66, 0xb1, 0x82, 0xb7, 0x36, 0x99, 0x61, 0x34, 0x72, 0x16, 0x3d, 0x7b, 0x5d, 0x31, 0x62, 0x12, 0x42, 0x76, 0x5a, 0x69, 0x52, 0x99, 0x47, 0x2b, 0x59, 0xfb, 0x42, 0x13, 0x4b, 0x47, 0x5b, 0xa6, 0x32, 0x0d, 0x4f, 0x2e, 0x5e, 0x84, 0x22, 0x6a, 0x56, 0x08, 0x63, 0xc4, 0x16, 0xe9, 0x65, 0x37, 0x70, 0xcb, 0x19, 0x60, 0x71, 0xa2, 0x7c, 0x5e, 0x19, 0xda, 0x7d, 0xe3, 0x86, 0xfc, 0x19, 0xe3, 0x8a, 0x25, 0x94, 0xb4, 0x1c, 0x85, 0x17, 0x41, 0x6d, 0xb8, 0xdb, 0x8d, 0x10, 0xfa, 0x70, 0x73, 0xd9, 0x2a, 0x00, 0x00, 0x6f, 0x82, 0xd1, 0x73, 0x16, 0x5e, 0x69, 0x45, 0xb1, 0xe2, 0x2a, 0xa2, 0x61, 0x27, 0x97, 0x49, 0x38, 0x85, 0x5b, 0x6b, 0x81, 0x47, 0x42, 0xa1, 0x57, 0x09, 0x70, 0x9c, 0x4a, 0xa9, 0x54, 0x14, 0x61, 0x54, 0x51, 0x08, 0x52, 0x03, 0x52, 0x2d, 0x55, 0xc5, 0x53, 0xf0, 0x42, 0xd3, 0x59, 0xc6, 0x56, 0xf2, 0x33, 0x9b, 0x5e, 0x00, 0x5b, 0x26, 0x25, 0xad, 0x61, 0xc2, 0x5f, 0xec, 0x18, 0x8d, 0x6f, 0x6d, 0x6c, 0x47, 0x19, 0xd8, 0x7c, 0x6d, 0x79, 0x0e, 0x1b, 0x54, 0x88, 0x10, 0x82, 0x47, 0x1b, 0x6d, 0x96, 0x48, 0x90, 0x7d, 0x1e, 0x71, 0x1d, 0x61, 0x5b, 0x89, 0xdb, 0x6b, 0x19, 0xc1, 0x5d, 0xde, 0xda, 0x2c, 0x0e, 0xbe, 0x60, 0x86, 0xd4, 0x2b, 0x25, 0x87, 0x59, 0x8d, 0xb3, 0xde, 0x39, 0x12, 0x54, 0xa6, 0x95, 0x04, 0x47, 0xde, 0x50, 0x76, 0x81, 0x65, 0x51, 0x7e, 0x4d, 0x94, 0x71, 0x09, 0x58, 0xc6, 0x4b, 0xfd, 0x62, 0x0e, 0x5e, 0x2a, 0x4b, 0x47, 0x52, 0xa3, 0x62, 0xb2, 0x4b, 0x6d, 0x43, 0x23, 0x66, 0xad, 0x4e, 0xd4, 0x34, 0xeb, 0x6a, 0x49, 0x52, 0x4f, 0x27, 0x40, 0x6d, 0x9f, 0x58, 0x6e, 0x1b, 0x0a, 0x7a, 0x25, 0x64, 0x28, 0x1b, 0x4f, 0x89, 0x3b, 0x72, 0xde, 0x1d, 0xe4, 0x93, 0x8a, 0x7c, 0x6b, 0x1e, 0x79, 0x9d, 0x13, 0x85, 0x29, 0x1e, 0xc7, 0x2b, 0xb0, 0x30, 0x28, 0xd1, 0x36, 0x2a, 0xef, 0x31, 0xd4, 0xd1, 0xc5, 0x20, 0xba, 0x4a, 0xbc, 0xd9, 0x54, 0x34, 0xfc, 0x4b, 0x22, 0xb6, 0x6c, 0x4b, 0x9a, 0x48, 0x2c, 0x95, 0x61, 0x58, 0xad, 0x46, 0x06, 0x82, 0x9c, 0x60, 0xc9, 0x44, 0x8c, 0x73, 0x79, 0x65, 0x53, 0x43, 0x59, 0x65, 0x86, 0x69, 0xff, 0x41, 0x6b, 0x54, 0xbd, 0x6e, 0x40, 0x40, 0xe4, 0x44, 0x88, 0x72, 0x25, 0x43, 0x54, 0x35, 0x85, 0x75, 0x45, 0x46, 0x91, 0x28, 0x0d, 0x78, 0xa7, 0x4c, 0xae, 0x1a, 0xaf, 0x85, 0x9b, 0x5a, 0x3a, 0x1c, 0xd7, 0x93, 0x66, 0x68, 0x87, 0x1f, 0xa9, 0x9c, 0x67, 0x74, 0x23, 0x1f, 0x29, 0xa6, 0xb0, 0x7b, 0x9a, 0x1f, 0x83, 0x2b, 0xc6, 0x30, 0x19, 0xd1, 0x3c, 0x2b, 0xd3, 0x30, 0x23, 0xd1, 0x49, 0x39, 0x52, 0x37, 0x10, 0xe0, 0xc4, 0x4c, 0xb3, 0x38, 0x23, 0xbe, 0x99, 0x5f, 0xdd, 0x3b, 0xa8, 0x9a, 0xb7, 0x69, 0x52, 0x3b, 0xf8, 0x87, 0x10, 0x6d, 0x9f, 0x3a, 0xc4, 0x77, 0xfe, 0x71, 0x90, 0x38, 0xff, 0x69, 0x4e, 0x75, 0xf6, 0x36, 0x82, 0x58, 0x20, 0x79, 0x37, 0x34, 0xa7, 0x46, 0xb7, 0x7c, 0x33, 0x34, 0x0f, 0x35, 0x9e, 0x7f, 0x4c, 0x36, 0xce, 0x27, 0x75, 0x81, 0xca, 0x3a, 0x48, 0x19, 0x2d, 0x90, 0x2b, 0x4b, 0xe8, 0x1d, 0x4a, 0x99, 0xd5, 0x5b, 0x39, 0x20, 0x58, 0xa2, 0xee, 0x65, 0xad, 0x1f, 0x8c, 0xad, 0x2a, 0x72, 0xc1, 0x1d, 0x84, 0x2b, 0x8b, 0x30, 0x72, 0xcd, 0xab, 0x2c, 0x9d, 0x2f, 0x23, 0xd0, 0xae, 0x53, 0x42, 0x25, 0xe6, 0xe9, 0x5a, 0x66, 0x89, 0x29, 0xee, 0xc7, 0xe4, 0x74, 0x1d, 0x30, 0x7c, 0xa4, 0x87, 0x79, 0xc2, 0x32, 0x7f, 0x8f, 0xe0, 0x7c, 0x00, 0x31, 0x2b, 0x7f, 0xb0, 0x80, 0x16, 0x2e, 0x3f, 0x6e, 0xcc, 0x82, 0xc7, 0x2b, 0xd6, 0x5c, 0xbf, 0x84, 0xd1, 0x29, 0x4d, 0x4a, 0xd3, 0x87, 0x19, 0x26, 0xdb, 0x39, 0x04, 0x89, 0x2e, 0x25, 0xb9, 0x27, 0xcb, 0x8b, 0xd1, 0x28, 0x3f, 0x18, 0x71, 0x98, 0x86, 0x3b, 0x11, 0x1a, 0x13, 0xa5, 0x32, 0x4d, 0x46, 0x1d, 0x76, 0xac, 0x78, 0x55, 0x8e, 0x1d, 0x34, 0xae, 0xd8, 0x63, 0x08, 0x1d, 0x85, 0x2c, 0x91, 0x2f, 0x18, 0xd0, 0xa2, 0x2c, 0xcb, 0x2f, 0x05, 0xd0, 0xbc, 0x76, 0x57, 0x22, 0xe8, 0xdd, 0xdf, 0x81, 0x11, 0x1e, 0xbb, 0xd0, 0x5f, 0x8a, 0x80, 0x24, 0xe7, 0xb1, 0xfe, 0x8e, 0x1e, 0x28, 0x41, 0x99, 0xfa, 0x8c, 0xdf, 0x25, 0xa8, 0x89, 0x85, 0x8f, 0x5e, 0x23, 0x9a, 0x77, 0x86, 0x8f, 0xd1, 0x20, 0xd7, 0x62, 0x16, 0x91, 0x19, 0x1e, 0x55, 0x50, 0x03, 0x91, 0x64, 0x1c, 0x09, 0x3e, 0x7a, 0x97, 0xa0, 0x1d, 0xaf, 0x2f, 0x4d, 0x9e, 0x8e, 0x1f, 0x83, 0x21, 0xd2, 0xab, 0x19, 0x26, 0xfd, 0x1e, 0xf8, 0xb7, 0x22, 0x36, 0xf8, 0x1b, 0x7e, 0xbf, 0x74, 0x44, 0x0b, 0x1b, 0xd8, 0xc3, 0x8f, 0x55, 0x6a, 0x1e, 0x0c, 0x2c, 0xa8, 0x2f, 0x09, 0xd0, 0xa9, 0x82, 0x30, 0x24, 0x4f, 0xdd, 0x8d, 0x8f, 0x5d, 0x23, 0x36, 0xd8, 0x69, 0x9d, 0x19, 0x20, 0xc4, 0xd2, 0x62, 0xa4, 0x96, 0x1a, 0xe4, 0xc5, 0xa2, 0xa3, 0x08, 0x1d, 0xe3, 0xad, 0xd9, 0xa4, 0x56, 0x1d, 0x25, 0x99, 0xf7, 0xa3, 0x99, 0x1c, 0x23, 0x82, 0x3d, 0xa4, 0x94, 0x1d, 0x59, 0x6d, 0xe4, 0xa4, 0xcf, 0x1d, 0x4e, 0x5b, 0xa7, 0xa5, 0x7c, 0x1c, 0xc9, 0x4a, 0x6a, 0xaa, 0x9a, 0x19, 0xe7, 0x39, 0xe3, 0xaf, 0x96, 0x18, 0xc8, 0x25, 0x2c, 0xc1, 0x4d, 0x20, 0xcc, 0x22, 0x98, 0xc5, 0x2a, 0x27, 0x07, 0x24, 0x8a, 0xc6, 0xbb, 0x29, 0x9b, 0x25, 0x57, 0xc6, 0xc3, 0x43, 0x89, 0x21, 0xa8, 0x88, 0xa4, 0x25, 0x13, 0xdc, 0xf9, 0x94, 0x2d, 0x25, 0x30, 0xda, 0xb1, 0x9f, 0x80, 0x24, 0x4f, 0xd6, 0xf0, 0xa9, 0x94, 0x21, 0x42, 0xd3, 0x5c, 0xb8, 0x06, 0x1c, 0xc7, 0xce, 0x9f, 0xbf, 0x75, 0x1d, 0x4c, 0xc4, 0xda, 0xbe, 0xa1, 0x1e, 0x60, 0xad, 0x89, 0xc0, 0x27, 0x1c, 0xac, 0x97, 0x9a, 0xc0, 0x36, 0x1b, 0xfe, 0x7f, 0x5f, 0xbe, 0x21, 0x1b, 0xfe, 0x6b, 0x46, 0xbe, 0x3c, 0x1a, 0x26, 0x58, 0x11, 0xbc, 0x8e, 0x17, 0x31, 0x43, 0x65, 0xc2, 0x5a, 0x13, 0xdb, 0x29, 0x32, 0xc7, 0x31, 0x23, 0x14, 0x27, 0x9a, 0xc8, 0x30, 0x26, 0xc3, 0x27, 0x74, 0xc8, 0xb7, 0x28, 0xcd, 0x27, 0x64, 0xc8, 0x30, 0x2d, 0xd3, 0x29, 0xef, 0x98, 0xc1, 0x25, 0xdf, 0xda, 0xcb, 0x9e, 0xd1, 0x25, 0x2f, 0xd8, 0xb1, 0xaa, 0x96, 0x23, 0xc6, 0xd6, 0x7a, 0xbd, 0xc8, 0x1e, 0xcb, 0xd7, 0x64, 0xc2, 0xc6, 0x1d, 0xe3, 0xd1, 0xcf, 0xc8, 0x75, 0x1f, 0xd8, 0xc7, 0x20, 0xc9, 0x99, 0x1f, 0x1c, 0xb6, 0xc3, 0xc9, 0xbe, 0x1e, 0x21, 0xa1, 0xa9, 0xc8, 0x49, 0x1c, 0x84, 0x86, 0x37, 0xc6, 0xf4, 0x1c, 0x24, 0x72, 0xe0, 0xc7, 0x1c, 0x1c, 0x66, 0x60, 0x22, 0xc7, 0xe2, 0x1d, 0x19, 0x50, 0x6e, 0xc9, 0x92, 0x19, 0xc5, 0x34, 0x4d, 0xca, 0x29, 0x24, 0xf6, 0x29, 0xb7, 0xca, 0x50, 0x27, 0x84, 0x29, 0x16, 0xca, 0x66, 0x29, 0x27, 0x28, 0xb5, 0xca, 0x74, 0x2a, 0x48, 0x28, 0x73, 0x1d, 0xc6, 0xaa, 0xa5, 0xb4, 0xea, 0x20, 0x32, 0xa9, 0x5f, 0xad, 0x74, 0x20, 0x4f, 0xa8, 0xc4, 0xab, 0x67, 0x21, 0x2e, 0xa8, 0xc7, 0xa8, 0x57, 0x24, 0x86, 0xa4, 0x20, 0x9d, 0xc6, 0x23, 0xa4, 0xa4, 0xc0, 0x97, 0x87, 0x26, 0xc4, 0xa3, 0x41, 0x92, 0x4e, 0x26, 0xf4, 0xa2, 0xd4, 0x8f, 0xdb, 0x28, 0x30, 0xa3, 0x71, 0x85, 0xc8, 0x29, 0xac, 0xa3, 0x23, 0x7d, 0x2c, 0x28, 0x81, 0x9a, 0xfd, 0x6f, 0xb5, 0x25, 0x15, 0xa1, 0x0d, 0x63, 0x6e, 0x24, 0x7f, 0xa0, 0x84, 0x5f, 0xcc, 0x15, 0x21, 0x97, 0x73, 0x23, 0xb4, 0x1a, 0xe8, 0x9f, 0x47, 0x20, 0xe5, 0x20, 0x26, 0xa6, 0xdc, 0x20, 0xa2, 0x23, 0xad, 0xb3, 0x71, 0x21, 0x25, 0x1b, 0x95, 0xaa, 0x6e, 0xc1, 0xe6, 0x1d, 0x2c, 0xa9, 0x6f, 0xb5, 0xb5, 0x1f, 0xd3, 0xa7, 0xb9, 0xac, 0x0b, 0x1f, 0xee, 0xa6, 0x93, 0xa8, 0xf2, 0x22, 0x6e, 0xa4, 0x8d, 0xa0, 0x7f, 0x24, 0xad, 0xa0, 0xeb, 0x99, 0x29, 0x23, 0xa0, 0xa1, 0x51, 0x91, 0xe2, 0x26, 0x79, 0x9e, 0x4b, 0x8c, 0x9c, 0x28, 0x8d, 0x95, 0x89, 0x7a, 0xa6, 0x28, 0xcf, 0x95, 0xa3, 0x71, 0xb9, 0x24, 0xf6, 0x8e, 0xae, 0x65, 0x79, 0x23, 0xc3, 0x9b, 0xf4, 0x5e, 0xc4, 0x16, 0x79, 0x98, 0x30, 0x3d, 0x92, 0x1b, 0xda, 0x92, 0xd8, 0x24, 0xf6, 0x19, 0x0f, 0x93, 0x84, 0x1a, 0xfd, 0x1f, 0xbc, 0x9d, 0x8b, 0x1b, 0xb2, 0x23, 0x69, 0xa3, 0x01, 0x1b, 0xfc, 0x1b, 0x10, 0xa9, 0x63, 0xc3, 0x0a, 0x1a, 0xd5, 0xa8, 0xaf, 0xc0, 0x67, 0x1a, 0xf5, 0xa7, 0xcd, 0xb8, 0x26, 0x1c, 0x79, 0xa5, 0xb0, 0xab, 0x54, 0x20, 0x30, 0xa1, 0xfa, 0xa3, 0xb6, 0x24, 0x09, 0x9e, 0xae, 0x98, 0x9b, 0x24, 0xb8, 0x9a, 0xee, 0x91, 0x20, 0x27, 0x53, 0x96, 0x4f, 0x85, 0xd2, 0x29, 0x57, 0x8b, 0xcb, 0x75, 0x90, 0x28, 0x02, 0x8d, 0x6d, 0x69, 0xe2, 0x23, 0xc1, 0x8a, 0xbd, 0x5e, 0x65, 0x20, 0xe9, 0x88, 0xb8, 0x4a, 0xd7, 0x20, 0x1d, 0x89, 0xbc, 0x2f, 0x42, 0x1f, 0x9a, 0x87, 0xe1, 0x21, 0x3a, 0x1f, 0x0b, 0x8e, 0x4c, 0x18, 0x93, 0x21, 0x81, 0x90, 0xf8, 0x15, 0x7a, 0x24, 0xbe, 0x9a, 0xd5, 0x16, 0xae, 0x14, 0xba, 0xa9, 0xae, 0xd5, 0x96, 0x11, 0x8a, 0xac, 0x22, 0xd2, 0xcb, 0x19, 0x66, 0xa5, 0x60, 0xbd, 0x8f, 0x18, 0xf8, 0xa2, 0xee, 0xb4, 0x03, 0x1b, 0x95, 0x9e, 0x8a, 0xa5, 0x80, 0x22, 0x61, 0x97, 0x86, 0x97, 0x3f, 0x26, 0x95, 0x8d, 0x14, 0x88, 0x75, 0x27, 0xcd, 0x85, 0xd8, 0x78, 0xf9, 0x29, 0xe9, 0x83, 0x1a, 0x6d, 0xd1, 0x28, 0xaa, 0x83, 0x2a, 0x62, 0xdf, 0x27, 0x59, 0x7e, 0xf7, 0x50, 0x3c, 0x20, 0xf2, 0x7d, 0x5e, 0x3a, 0xa5, 0x22, 0xa3, 0x7f, 0x56, 0x26, 0x75, 0x24, 0xd3, 0x85, 0x2a, 0x1d, 0x42, 0x26, 0xc9, 0x87, 0x33, 0x18, 0x1d, 0x26, 0x60, 0x8a, 0x0c, 0x10, 0x03, 0x46, 0x08, 0x95, 0xef, 0x10, 0xca, 0x16, 0x77, 0xa3, 0x7c, 0xd5, 0xb1, 0x13, 0x52, 0xa5, 0xfb, 0xd2, 0x98, 0x10, 0x08, 0xa3, 0x97, 0xcb, 0x50, 0x12, 0x1a, 0x9d, 0x8f, 0xb7, 0xcc, 0x17, 0x51, 0x95, 0x1f, 0xa5, 0x67, 0x1d, 0x50, 0x89, 0xa7, 0x8f, 0x10, 0x25, 0xf6, 0x85, 0x23, 0x81, 0xf9, 0x2b, 0xb3, 0x82, 0x3b, 0x76, 0xd5, 0x2e, 0x18, 0x7e, 0x5f, 0x69, 0x19, 0x2e, 0xdd, 0x7b, 0x0b, 0x59, 0xd6, 0x2d, 0x11, 0x78, 0x3e, 0x46, 0xf4, 0x2a, 0x5f, 0x76, 0x1e, 0x31, 0xc7, 0x2c, 0x5a, 0x74, 0x22, 0x1d, 0x52, 0x32, 0xfa, 0x7c, 0xc8, 0x1a, 0x7e, 0x44, 0x7e, 0x85, 0xd0, 0x13, 0x40, 0x56, 0xec, 0x91, 0x5f, 0x14, 0xaf, 0x6b, 0x3c, 0x9d, 0xca, 0x16, 0x23, 0x15, 0x7a, 0x99, 0xdb, 0xd7, 0x60, 0x12, 0x74, 0x9c, 0x96, 0xd3, 0xad, 0x10, 0x87, 0x9a, 0xf0, 0xcb, 0xc7, 0x12, 0x28, 0x92, 0xec, 0xb8, 0x6d, 0x15, 0x63, 0x8a, 0x0a, 0xa1, 0x5e, 0x23, 0xf9, 0x83, 0x68, 0x8e, 0xf3, 0x2c, 0x46, 0x7e, 0x37, 0x7f, 0xd2, 0x32, 0x1e, 0x79, 0x0e, 0x71, 0xe6, 0x35, 0x1c, 0x75, 0x2b, 0x63, 0x5d, 0x36, 0x2a, 0x71, 0xe3, 0x52, 0xa5, 0x36, 0x27, 0x6f, 0xc2, 0x3e, 0x30, 0x39, 0x2a, 0x70, 0x07, 0x2c, 0x45, 0x40, 0x06, 0x6f, 0x68, 0x1b, 0x2e, 0x4a, 0xb8, 0x7a, 0x42, 0x18, 0xa2, 0x5a, 0x97, 0x81, 0xcd, 0x16, 0x73, 0x6d, 0xda, 0x91, 0xc6, 0x17, 0x75, 0x71, 0x8d, 0x99, 0x48, 0x19, 0x01, 0x15, 0x6e, 0x8f, 0x96, 0xda, 0x37, 0x12, 0x85, 0x92, 0x16, 0xd6, 0xda, 0x0d, 0xe1, 0x91, 0x05, 0xcd, 0xc4, 0x0a, 0x33, 0x88, 0xda, 0xb8, 0x05, 0x21, 0x3a, 0x7f, 0xf2, 0x9f, 0x35, 0x2c, 0x03, 0x79, 0x7b, 0x8d, 0x89, 0x34, 0x8a, 0x74, 0x08, 0x7c, 0xd7, 0x39, 0x6b, 0x6f, 0x30, 0x6c, 0xe5, 0x3e, 0x3b, 0x6b, 0x6a, 0x5e, 0x19, 0x40, 0xea, 0x68, 0xe2, 0x4d, 0x2d, 0x43, 0xd3, 0x68, 0xbf, 0x3a, 0x25, 0x49, 0x58, 0x6b, 0x24, 0x2a, 0x75, 0x4e, 0x34, 0x6d, 0xef, 0x1b, 0xe8, 0x5b, 0x0f, 0x77, 0xdd, 0x19, 0x1f, 0x67, 0x4b, 0x82, 0x97, 0x18, 0xb5, 0x77, 0xf0, 0x91, 0x46, 0x1a, 0x4c, 0x80, 0x88, 0x98, 0x17, 0x1b, 0x47, 0x13, 0x1d, 0x80, 0xfa, 0xdf, 0x7b, 0x13, 0x6f, 0x86, 0x20, 0xda, 0xa3, 0x0c, 0xe0, 0x86, 0x44, 0xd2, 0x1d, 0x15, 0xa0, 0x7e, 0x0d, 0xb9, 0xab, 0x28, 0x25, 0x75, 0xe7, 0xa0, 0xbd, 0x34, 0xc5, 0x6f, 0x1f, 0x8c, 0xc4, 0x3d, 0xea, 0x69, 0x97, 0x7b, 0xa3, 0x45, 0xcb, 0x65, 0x9d, 0x6b, 0x27, 0x4b, 0x09, 0x63, 0x26, 0x5b, 0x76, 0x50, 0x03, 0x62, 0xaa, 0x4b, 0x22, 0x54, 0x47, 0x64, 0x36, 0x3b, 0x15, 0x58, 0x74, 0x67, 0x79, 0x2c, 0x1f, 0x5d, 0x1a, 0x6a, 0xce, 0x1d, 0xb3, 0x67, 0x71, 0x74, 0x69, 0x19, 0x40, 0x74, 0x30, 0x7f, 0x73, 0x19, 0xe4, 0x81, 0xbe, 0x8b, 0x51, 0x1a, 0x7d, 0x8e, 0xeb, 0x99, 0x08, 0x1d, 0x25, 0x19, 0xe0, 0x72, 0x45, 0xdf, 0x46, 0x15, 0xb1, 0x76, 0x13, 0xde, 0xa8, 0x0a, 0xcb, 0x78, 0xc9, 0xdc, 0xa2, 0x21, 0x8c, 0x72, 0x23, 0xbc, 0x64, 0x34, 0x0a, 0x6a, 0x43, 0xa0, 0xcc, 0x42, 0x01, 0x63, 0xa9, 0x8b, 0x90, 0x4c, 0x29, 0x5f, 0x6c, 0x7a, 0xb2, 0x53, 0xf3, 0x5d, 0x03, 0x6a, 0xb4, 0x5a, 0x2b, 0x5b, 0x22, 0x5b, 0x4e, 0x5e, 0xaa, 0x5c, 0xc5, 0x4b, 0xcb, 0x62, 0x7c, 0x5f, 0x97, 0x3c, 0x97, 0x66, 0x5a, 0x63, 0x18, 0x2e, 0x13, 0x6a, 0x4f, 0x67, 0x28, 0x20, 0x6b, 0x72, 0xa8, 0x6f, 0xe9, 0x1a, 0x24, 0x7f, 0xca, 0x7c, 0x5a, 0x1b, 0xb7, 0x8b, 0xb8, 0x86, 0x98, 0x1b, 0xf0, 0x98, 0x8d, 0x94, 0x26, 0x1f, 0x06, 0x1f, 0xcf, 0x60, 0xa5, 0xde, 0xd7, 0x1f, 0xc2, 0x64, 0x5e, 0xde, 0x6b, 0x1a, 0x7c, 0x69, 0xc4, 0xdf, 0xae, 0x30, 0x56, 0x64, 0x26, 0xbf, 0x17, 0x43, 0xfb, 0x5d, 0xaa, 0xa0, 0xb5, 0x52, 0x20, 0x59, 0x3b, 0x8c, 0x0b, 0x5b, 0xab, 0x56, 0xa7, 0x7a, 0xf6, 0x62, 0x3e, 0x54, 0xdd, 0x6b, 0x64, 0x67, 0x1c, 0x54, 0x42, 0x5b, 0xbe, 0x6b, 0x83, 0x54, 0x82, 0x4b, 0xf1, 0x6f, 0x6d, 0x57, 0xd6, 0x3d, 0x66, 0x73, 0x3e, 0x5b, 0xe1, 0x2f, 0xb2, 0x76, 0x3e, 0x60, 0x40, 0x22, 0x74, 0x7e, 0x3f, 0x69, 0x00, 0x1c, 0x4e, 0x8b, 0xd5, 0x75, 0x43, 0x1d, 0x9e, 0x96, 0xe9, 0x7e, 0x98, 0x1d, 0x8b, 0xa2, 0xfd, 0x8c, 0x1c, 0x1f, 0xf3, 0x2b, 0xbc, 0x30, 0x33, 0xd1, 0x42, 0x2b, 0x03, 0x36, 0x32, 0xd3, 0xc6, 0x2d, 0x02, 0x56, 0x2e, 0xe5, 0x2d, 0x42, 0x5e, 0x54, 0x9b, 0xc2, 0x30, 0x56, 0xe7, 0x51, 0x16, 0xa1, 0x22, 0x62, 0xca, 0x4f, 0x48, 0x8d, 0x1f, 0x6a, 0xa1, 0x4e, 0x0a, 0x7d, 0x1b, 0x6e, 0xbe, 0x4c, 0xf8, 0x6e, 0xb2, 0x73, 0x3c, 0x4b, 0x61, 0x5d, 0x55, 0x77, 0x8f, 0x4a, 0x5d, 0x4c, 0xf6, 0x7b, 0x6c, 0x4d, 0x13, 0x3d, 0xb2, 0x7e, 0xcb, 0x51, 0x0e, 0x30, 0x86, 0x81, 0xe8, 0x55, 0xdc, 0x22, 0xca, 0x89, 0x28, 0x5e, 0x43, 0x1d, 0x65, 0x96, 0x83, 0x6d, 0x3a, 0x20, 0x54, 0xa1, 0x61, 0x75, 0xa8, 0x1f, 0x20, 0xab, 0x82, 0x7f, 0xbe, 0x1f, 0xbe, 0x2b, 0xd3, 0x30, 0x24, 0xd1, 0x49, 0x2b, 0xd0, 0x30, 0x30, 0xd1, 0x80, 0x48, 0xf7, 0x40, 0xe8, 0xed, 0xe8, 0x5b, 0x09, 0x42, 0xb0, 0xc9, 0x0b, 0x6b, 0x14, 0x44, 0xfc, 0xa4, 0xe2, 0x73, 0x16, 0x45, 0x89, 0x91, 0x08, 0x77, 0x49, 0x44, 0xaa, 0x81, 0x2e, 0x7b, 0x24, 0x43, 0x76, 0x71, 0xbe, 0x7f, 0x9b, 0x40, 0x87, 0x5f, 0xf1, 0x82, 0xe5, 0x3f, 0x0d, 0x4f, 0x35, 0x86, 0x04, 0x3e, 0xed, 0x3e, 0xa6, 0x89, 0x19, 0x42, 0x09, 0x30, 0x15, 0x8b, 0xd0, 0x45, 0x20, 0x21, 0xd0, 0x94, 0x00, 0x52, 0x1f, 0x1e, 0x80, 0x9f, 0x8e, 0x5f, 0xa8, 0x20, 0x32, 0xab, 0x58, 0x6c, 0x12, 0x1d, 0x5f, 0xae, 0x8e, 0x74, 0x7c, 0x1d, 0xa9, 0x2c, 0x86, 0x2f, 0x31, 0xd0, 0xa7, 0x2c, 0xb4, 0x2f, 0x37, 0xd0, 0xc6, 0x63, 0xe0, 0x2f, 0xa7, 0xf6, 0x39, 0x75, 0xd6, 0x33, 0x69, 0xd2, 0x11, 0x7f, 0xf7, 0x38, 0x63, 0xaf, 0x92, 0x83, 0x51, 0x3a, 0xe0, 0x99, 0x7d, 0x85, 0x86, 0x3a, 0x52, 0x88, 0x2d, 0x89, 0x72, 0x38, 0x15, 0x76, 0xa1, 0x8c, 0x90, 0x35, 0x77, 0x64, 0x6f, 0x8e, 0xda, 0x33, 0xc4, 0x53, 0x5c, 0x91, 0x0f, 0x31, 0x43, 0x42, 0x31, 0x93, 0x7f, 0x30, 0x30, 0x30, 0x86, 0x96, 0x31, 0x33, 0x38, 0x21, 0x2e, 0x9e, 0x3c, 0x3e, 0x7c, 0x1a, 0x7a, 0xaa, 0xc8, 0x50, 0x78, 0x1c, 0xfe, 0xae, 0x69, 0x59, 0xbe, 0x1d, 0x33, 0xb5, 0xac, 0x66, 0xa3, 0x1b, 0x71, 0x2c, 0x9d, 0x2f, 0x23, 0xd0, 0xae, 0x6f, 0xbb, 0x28, 0xf8, 0xe7, 0x4c, 0x7e, 0x9d, 0x27, 0xdc, 0xe3, 0x1d, 0x90, 0xb0, 0x26, 0x12, 0xdc, 0x53, 0x96, 0xeb, 0x2b, 0x7e, 0xbf, 0x85, 0x96, 0xb0, 0x2d, 0x8d, 0xa8, 0xb3, 0x97, 0x82, 0x2d, 0x85, 0x93, 0x09, 0x99, 0x5d, 0x2b, 0x7e, 0x7e, 0x46, 0x9a, 0xa4, 0x2a, 0x15, 0x6a, 0x45, 0x9c, 0x07, 0x28, 0x06, 0x58, 0x33, 0x9d, 0x21, 0x25, 0x35, 0x46, 0x3d, 0x9f, 0xc0, 0x21, 0xe3, 0x33, 0xc8, 0xa4, 0x74, 0x1e, 0x49, 0x20, 0x4f, 0xb1, 0x0d, 0x29, 0x2d, 0x1d, 0x5a, 0xbb, 0xe5, 0x3a, 0x8b, 0x1b, 0xd6, 0xc2, 0xee, 0x4c, 0x25, 0x1e, 0x10, 0xc5, 0x56, 0x5a, 0x7f, 0x1e, 0xd4, 0x2c, 0xb4, 0x2f, 0x14, 0xd0, 0xb5, 0x88, 0x4d, 0x26, 0xda, 0xdf, 0xbe, 0x95, 0x18, 0x26, 0xd0, 0xdc, 0xda, 0xa4, 0x11, 0x25, 0x06, 0xd7, 0xd1, 0xaf, 0x2b, 0x1b, 0xd6, 0xcf, 0xcb, 0xad, 0x58, 0x1d, 0x2e, 0xb8, 0x40, 0xab, 0xa4, 0x1d, 0xad, 0x9f, 0x6b, 0xab, 0xd9, 0x1b, 0xec, 0x87, 0x1d, 0xab, 0xec, 0x1d, 0x1f, 0x71, 0xc2, 0xac, 0x24, 0x1d, 0xf3, 0x5f, 0x6b, 0xae, 0x53, 0x1b, 0x5e, 0x4d, 0x70, 0xaf, 0x7e, 0x19, 0x53, 0x3a, 0xf9, 0xb8, 0xfc, 0x13, 0xc5, 0x25, 0x83, 0xc4, 0x60, 0x23, 0x9f, 0x24, 0xb6, 0xc6, 0xf0, 0x28, 0xad, 0x25, 0xc4, 0xc7, 0xfb, 0x2a, 0xc6, 0x26, 0x34, 0xc8, 0x2c, 0x49, 0x82, 0x22, 0xb2, 0x8a, 0xbb, 0x26, 0xa2, 0xde, 0xc5, 0x99, 0x39, 0x27, 0x07, 0xdc, 0x60, 0xa4, 0x4c, 0x26, 0x62, 0xd9, 0x84, 0xaf, 0x52, 0x23, 0xec, 0xd7, 0xe3, 0xbe, 0x5b, 0x1d, 0x11, 0xd5, 0x98, 0xc5, 0xbd, 0x1d, 0xc8, 0xc9, 0x48, 0xc5, 0x23, 0x1e, 0xa0, 0xb3, 0x47, 0xc4, 0xce, 0x1c, 0xd0, 0x9a, 0x17, 0xc3, 0xfb, 0x1c, 0x31, 0x81, 0x48, 0xc2, 0xf0, 0x1b, 0xf8, 0x6d, 0x72, 0xc3, 0x21, 0x19, 0xf2, 0x5a, 0x1d, 0xc4, 0x7b, 0x19, 0x80, 0x47, 0xd8, 0xc6, 0xe1, 0x17, 0xca, 0x2c, 0x9d, 0xc9, 0x75, 0x27, 0xbe, 0x29, 0xae, 0xc9, 0xe7, 0x28, 0xc8, 0x28, 0x54, 0xca, 0x1a, 0x2a, 0x85, 0x28, 0x0b, 0xca, 0x39, 0x2b, 0xa0, 0x27, 0xde, 0x9d, 0x3c, 0x27, 0x31, 0xdb, 0xdd, 0xa3, 0xae, 0x26, 0xd6, 0xda, 0x17, 0xac, 0xb5, 0x25, 0x7f, 0xd8, 0x9c, 0xc0, 0xc1, 0x21, 0x56, 0xda, 0x42, 0xcb, 0xd4, 0x1e, 0x1e, 0xd6, 0xe7, 0xcc, 0x01, 0x20, 0x52, 0xca, 0x94, 0xcc, 0xef, 0x1f, 0x86, 0xb9, 0x23, 0xcc, 0xc9, 0x1d, 0x2a, 0xa1, 0x84, 0xcb, 0xb5, 0x1c, 0xb2, 0x87, 0xe5, 0xca, 0x51, 0x1d, 0x63, 0x74, 0xb5, 0xca, 0x26, 0x1e, 0xb5, 0x62, 0x16, 0xca, 0xa6, 0x1f, 0x2e, 0x52, 0x0d, 0xcc, 0x1e, 0x1c, 0x10, 0x36, 0x60, 0xcb, 0x58, 0x26, 0x12, 0x2a, 0x90, 0xcb, 0x39, 0x28, 0x61, 0x29, 0xbd, 0xcb, 0x24, 0x29, 0xdb, 0x29, 0x3c, 0xcb, 0x14, 0x2a, 0xe2, 0x28, 0xe5, 0x1e, 0x52, 0xac, 0x3f, 0xb7, 0x07, 0x20, 0x91, 0xab, 0x46, 0xaf, 0x6b, 0x20, 0xb7, 0xaa, 0xf0, 0xad, 0xa4, 0x21, 0xa5, 0xac, 0x75, 0xaa, 0xd3, 0x22, 0x09, 0xaf, 0xc5, 0xa7, 0xa1, 0x23, 0x71, 0xa7, 0x95, 0x9a, 0x43, 0x27, 0x03, 0xa5, 0xca, 0x94, 0x68, 0x27, 0x31, 0xa5, 0x61, 0x91, 0xf2, 0x28, 0x78, 0xa5, 0x39, 0x87, 0x27, 0x29, 0xd0, 0xa4, 0x7e, 0x7e, 0x08, 0x29, 0x68, 0xa3, 0x93, 0x74, 0xc1, 0x25, 0x7f, 0xa3, 0x1d, 0x64, 0x9f, 0x24, 0xae, 0xa2, 0x70, 0x5f, 0xb4, 0x18, 0x77, 0xa4, 0x19, 0x26, 0x2c, 0x1f, 0xa0, 0xb0, 0x47, 0x24, 0x88, 0x24, 0x03, 0xbb, 0xf2, 0x24, 0xfb, 0x26, 0x29, 0xbf, 0x24, 0x24, 0x6a, 0x1c, 0x5e, 0xac, 0x43, 0xc3, 0xea, 0x1d, 0xe5, 0xab, 0x7c, 0xb8, 0x4b, 0x20, 0x50, 0xaa, 0x39, 0xae, 0xa3, 0x20, 0x7c, 0xa9, 0x8e, 0xac, 0x09, 0x21, 0xca, 0xaa, 0x76, 0xa7, 0x7a, 0x24, 0x55, 0xa4, 0x88, 0x9c, 0xa3, 0x23, 0x64, 0xa4, 0xc2, 0x95, 0x34, 0x26, 0xe1, 0xa2, 0x86, 0x90, 0x14, 0x27, 0x75, 0xa2, 0xbc, 0x86, 0x25, 0x29, 0xe7, 0xa1, 0x76, 0x7a, 0xb3, 0x28, 0x69, 0x9b, 0x8a, 0x6d, 0x72, 0x24, 0x83, 0x9f, 0x78, 0x60, 0xc8, 0x15, 0x7a, 0x9c, 0x11, 0x3e, 0x5d, 0x16, 0x2b, 0x98, 0xc1, 0x21, 0x8f, 0x1d, 0x37, 0x9f, 0xa9, 0x1e, 0xaa, 0x22, 0xad, 0xaa, 0x50, 0x1e, 0xb5, 0x25, 0xb1, 0xb2, 0x61, 0x1f, 0x51, 0x16, 0x36, 0xb4, 0xe6, 0xd7, 0x1e, 0x1b, 0xd9, 0xab, 0x14, 0xc3, 0x0e, 0x1c, 0x0b, 0xaa, 0xa0, 0xbb, 0x7e, 0x1d, 0x91, 0xa9, 0x07, 0xaf, 0x35, 0x1f, 0xfd, 0xa6, 0x83, 0xa8, 0x7a, 0x23, 0x75, 0xa3, 0x3c, 0x9d, 0x1f, 0x24, 0x6f, 0x9f, 0x17, 0x95, 0x2e, 0x24, 0xa3, 0x9a, 0x76, 0x89, 0x22, 0x29, 0xf4, 0x8f, 0xcf, 0x7a, 0x56, 0x2b, 0x1d, 0x8c, 0xa0, 0x6e, 0xea, 0x28, 0x9c, 0x88, 0xf1, 0x60, 0xd2, 0x20, 0x76, 0x8f, 0x36, 0x4c, 0xcb, 0x1d, 0x3d, 0x8c, 0x83, 0x30, 0x52, 0x1c, 0xe3, 0x91, 0x1b, 0x20, 0x0d, 0x1d, 0xec, 0x94, 0x8b, 0x17, 0x98, 0x23, 0xb9, 0x9d, 0x25, 0x18, 0x9d, 0x27, 0xa3, 0xa5, 0x01, 0x1a, 0x68, 0x16, 0x4f, 0xac, 0x0b, 0xd8, 0x49, 0x13, 0xe3, 0xb0, 0x36, 0xd5, 0xdf, 0x1a, 0xde, 0xa8, 0xd9, 0xc1, 0x6a, 0x1a, 0xb1, 0xa7, 0x67, 0xb9, 0x26, 0x1b, 0x99, 0xa3, 0xe9, 0xab, 0xfa, 0x21, 0xa7, 0x9c, 0xdd, 0x9c, 0xda, 0x25, 0xaf, 0x93, 0x4f, 0x8b, 0x29, 0x29, 0xd2, 0x90, 0x10, 0x82, 0x34, 0x2d, 0x59, 0x8d, 0xdb, 0x77, 0x15, 0x2d, 0xba, 0x8a, 0xe9, 0x68, 0x62, 0x2c, 0xd6, 0x87, 0xbd, 0x57, 0xa3, 0x26, 0xe7, 0x85, 0xd2, 0x42, 0x23, 0x21, 0x31, 0x83, 0xe0, 0x28, 0x12, 0x24, 0xfe, 0x87, 0xc6, 0x1b, 0x2f, 0x25, 0x34, 0x8a, 0xc0, 0x0f, 0xfd, 0x42, 0x07, 0x95, 0xc8, 0x10, 0x67, 0x55, 0xa1, 0xa0, 0x6d, 0x12, 0x72, 0x18, 0x04, 0xa7, 0x3d, 0xd8, 0xb6, 0x15, 0x87, 0xa9, 0x3c, 0xd6, 0x55, 0x10, 0x54, 0xa9, 0xd8, 0xd2, 0x53, 0x14, 0x49, 0xa3, 0xd4, 0xbe, 0xbf, 0x1d, 0x4a, 0x97, 0x1c, 0xa8, 0xda, 0x24, 0xeb, 0x93, 0x42, 0x99, 0x3f, 0x29, 0xa3, 0x8f, 0x63, 0x8c, 0x57, 0x30, 0x41, 0x8a, 0x07, 0x7e, 0xc8, 0x33, 0x45, 0x85, 0xbb, 0x71, 0x21, 0x33, 0x97, 0x82, 0xe3, 0x61, 0x8b, 0x32, 0xf3, 0x80, 0x4b, 0x4e, 0xd5, 0x30, 0x87, 0x7e, 0x14, 0x38, 0xce, 0x34, 0x0f, 0x7e, 0x3d, 0x24, 0xbf, 0x38, 0x97, 0x81, 0x7c, 0x17, 0x40, 0x46, 0xec, 0x8d, 0x2f, 0x13, 0xbf, 0x58, 0xdb, 0x94, 0x52, 0x14, 0x74, 0x6c, 0x03, 0xa1, 0x59, 0x15, 0x85, 0x17, 0xb3, 0x9d, 0x68, 0xda, 0x3e, 0x15, 0x5e, 0xa0, 0x41, 0xd7, 0x90, 0x13, 0xdc, 0xa0, 0xd2, 0xd1, 0xd2, 0x0f, 0xa2, 0x9c, 0x78, 0xc4, 0x5c, 0x1f, 0x86, 0x92, 0x3d, 0xab, 0xe2, 0x29, 0x42, 0x8c, 0x20, 0x99, 0x48, 0x31, 0xca, 0x86, 0x8a, 0x87, 0xfc, 0x36, 0x6b, 0x81, 0x6c, 0x79, 0xd3, 0x39, 0x3a, 0x7c, 0xb3, 0x6a, 0xdf, 0x3b, 0xb3, 0x79, 0xd5, 0x5a, 0x0c, 0x3d, 0x09, 0x77, 0xbf, 0x46, 0x17, 0x41, 0x08, 0x78, 0x26, 0x33, 0x91, 0x46, 0x8d, 0x79, 0xbc, 0x22, 0xc7, 0x4e, 0x63, 0x7e, 0x7b, 0x18, 0x41, 0x5e, 0x28, 0x86, 0xb6, 0x16, 0x16, 0x6f, 0x32, 0x96, 0x66, 0x1a, 0xda, 0x77, 0xdb, 0x9d, 0x50, 0x1a, 0xed, 0x18, 0x1d, 0x93, 0x8a, 0xdd, 0x57, 0x16, 0x16, 0x96, 0xc0, 0xdb, 0x24, 0x13, 0x31, 0x97, 0x10, 0xd4, 0x7b, 0x12, 0x1e, 0x91, 0xf7, 0xc3, 0x93, 0x25, 0x85, 0x89, 0x07, 0xab, 0xfa, 0x32, 0x4e, 0x81, 0xe4, 0x97, 0x4e, 0x39, 0xe7, 0x7c, 0xa0, 0x85, 0x86, 0x40, 0xcc, 0x77, 0xec, 0x75, 0x98, 0x45, 0x9a, 0x74, 0x5d, 0x66, 0x0a, 0x49, 0x5e, 0x72, 0x01, 0x55, 0xe0, 0x4c, 0xd8, 0x71, 0x61, 0x43, 0xa2, 0x52, 0xc5, 0x73, 0xfe, 0x33, 0xa2, 0x57, 0xc8, 0x76, 0x9a, 0x24, 0x4c, 0x5e, 0xaf, 0x7b, 0xb2, 0x19, 0x39, 0x6c, 0x80, 0x88, 0x14, 0x18, 0xbc, 0x7a, 0x6a, 0x95, 0x7b, 0x1a, 0x7c, 0x82, 0xa1, 0x99, 0xb6, 0x1b, 0x87, 0x19, 0x5d, 0x87, 0x9e, 0xe1, 0x8f, 0x17, 0xf3, 0x8b, 0x4f, 0xdf, 0x27, 0x14, 0x3b, 0x8d, 0x43, 0xda, 0x59, 0x1e, 0x2a, 0x86, 0xd5, 0xc5, 0x76, 0x30, 0x37, 0x7e, 0x4c, 0xab, 0x4d, 0x3c, 0x6e, 0x77, 0x8e, 0x97, 0x22, 0x46, 0xac, 0x72, 0x25, 0x85, 0x86, 0x4e, 0x2e, 0x6e, 0xa5, 0x74, 0x89, 0x53, 0xc2, 0x6c, 0x39, 0x64, 0x6b, 0x58, 0xd8, 0x6b, 0xba, 0x53, 0xd0, 0x5d, 0x0e, 0x6c, 0xe1, 0x43, 0xfb, 0x61, 0xae, 0x70, 0x4d, 0x34, 0xcd, 0x66, 0x6a, 0x73, 0xad, 0x26, 0x58, 0x6a, 0xe5, 0x78, 0x2f, 0x19, 0x79, 0x78, 0x34, 0x83, 0x46, 0x19, 0xc1, 0x85, 0xca, 0x91, 0xd3, 0x1c, 0x07, 0x91, 0x24, 0x9a, 0x9c, 0x1d, 0x4c, 0x1c, 0xac, 0x76, 0xeb, 0xe3, 0x1f, 0x1a, 0xc8, 0x7b, 0x39, 0xe4, 0x16, 0x15, 0xb7, 0x82, 0x21, 0xe7, 0x7b, 0x2b, 0x65, 0x7a, 0xc7, 0xc7, 0xe9, 0x3d, 0x52, 0x72, 0xa2, 0xab, 0x4f, 0x4b, 0x36, 0x6c, 0x41, 0x96, 0x61, 0x55, 0x75, 0x68, 0x79, 0x85, 0x05, 0x5c, 0xff, 0x66, 0x13, 0x74, 0x70, 0x63, 0x6c, 0x64, 0x67, 0x64, 0x98, 0x67, 0x71, 0x65, 0x42, 0x54, 0x49, 0x6a, 0xf8, 0x68, 0x11, 0x45, 0x62, 0x6e, 0xf0, 0x6b, 0xc1, 0x36, 0x6f, 0x72, 0xd0, 0x6f, 0x2a, 0x28, 0x50, 0x75, 0xf8, 0x73, 0x7d, 0x1a, 0x52, 0x82, 0xe8, 0x80, 0x03, 0x1b, 0xaa, 0x8e, 0xe5, 0x8a, 0xdd, 0x1c, 0xfb, 0x9c, 0xff, 0x98, 0xbf, 0x1f, 0x8a, 0x23, 0xe9, 0x65, 0xe5, 0xe1, 0x73, 0x24, 0x62, 0x6a, 0x7d, 0xe3, 0x8d, 0x26, 0x86, 0x73, 0x11, 0xea, 0xac, 0x3c, 0x3a, 0x6c, 0x5b, 0xca, 0x31, 0x4e, 0x80, 0x66, 0x34, 0xab, 0xca, 0x5b, 0xd8, 0x61, 0xdf, 0x96, 0xb1, 0x65, 0x8e, 0x5f, 0x71, 0x85, 0x68, 0x6b, 0xc9, 0x5e, 0x28, 0x75, 0x0d, 0x70, 0xa7, 0x5d, 0x44, 0x64, 0xc2, 0x74, 0xb1, 0x5d, 0x22, 0x54, 0xac, 0x78, 0x7d, 0x60, 0x3a, 0x45, 0xe2, 0x7b, 0xe4, 0x64, 0x1d, 0x37, 0x7f, 0x7f, 0x30, 0x68, 0x4d, 0x29, 0xf6, 0x81, 0x9a, 0x6d, 0x18, 0x1d, 0x06, 0x8f, 0x2c, 0x79, 0x8d, 0x1e, 0x3e, 0x99, 0xb4, 0x82, 0xb0, 0x1e, 0x60, 0xa7, 0xcc, 0x91, 0x5f, 0x20, 0x32, 0x2b, 0xc8, 0x30, 0x3d, 0xd1, 0x4d, 0x2e, 0x91, 0x54, 0x36, 0xe2, 0x18, 0x3a, 0x40, 0x5f, 0xdb, 0xef, 0x9f, 0x4f, 0xf5, 0x5d, 0x40, 0xcd, 0xf9, 0x61, 0xbd, 0x5a, 0x29, 0xac, 0x3b, 0x6c, 0xa8, 0x58, 0x56, 0x97, 0xc4, 0x74, 0x56, 0x57, 0x0c, 0x87, 0x29, 0x78, 0xc0, 0x56, 0x59, 0x78, 0x3b, 0x7d, 0x5b, 0x54, 0xc7, 0x66, 0x85, 0x81, 0x15, 0x53, 0xa7, 0x55, 0x98, 0x85, 0x1c, 0x56, 0x3f, 0x46, 0x2e, 0x88, 0x05, 0x59, 0xb3, 0x37, 0xe8, 0x8a, 0x7a, 0x5d, 0x68, 0x2a, 0xb3, 0x8c, 0x87, 0x64, 0xd3, 0x1f, 0x37, 0x98, 0xd0, 0x70, 0x07, 0x1f, 0x5f, 0xa4, 0xb0, 0x79, 0x35, 0x1e, 0xfa, 0xad, 0xf7, 0x83, 0x4f, 0x1f, 0xfb, 0x2b, 0xc3, 0x30, 0x26, 0xd1, 0x75, 0x2c, 0xfe, 0x2e, 0x97, 0xd2, 0x7b, 0x57, 0x0d, 0x4b, 0x13, 0xf7, 0xb0, 0x67, 0xe1, 0x4d, 0x9e, 0xd3, 0xb5, 0x76, 0x03, 0x4e, 0x8b, 0xb0, 0x32, 0x7d, 0x14, 0x4f, 0x34, 0x9b, 0xbf, 0x81, 0x39, 0x4e, 0x54, 0x8b, 0x16, 0x85, 0x57, 0x4d, 0x2a, 0x7a, 0xaf, 0x89, 0xe2, 0x4a, 0x80, 0x68, 0x61, 0x8d, 0x38, 0x49, 0x49, 0x57, 0xa6, 0x90, 0x77, 0x4a, 0x0e, 0x47, 0xb4, 0x93, 0x62, 0x4c, 0xc9, 0x38, 0x0c, 0x95, 0x9c, 0x50, 0x83, 0x2b, 0x1f, 0x97, 0xb8, 0x55, 0xa3, 0x1e, 0x9b, 0xa2, 0xca, 0x63, 0x1b, 0x1f, 0x9d, 0xad, 0x3f, 0x6f, 0x1d, 0x1d, 0x45, 0xb2, 0xb3, 0x77, 0x4f, 0x1d, 0xff, 0x2c, 0xef, 0x2e, 0x69, 0xd2, 0x5f, 0x2d, 0x30, 0x2e, 0x74, 0xd2, 0x8a, 0x71, 0x63, 0x3a, 0x03, 0xff, 0xff, 0x82, 0xdf, 0x3d, 0xf9, 0xdb, 0xfe, 0x8a, 0xf2, 0x42, 0x90, 0xbc, 0x4d, 0x8d, 0x7c, 0x44, 0xa8, 0xa3, 0xc3, 0x8f, 0x26, 0x44, 0xe2, 0x91, 0xc8, 0x93, 0x59, 0x42, 0xcd, 0x7e, 0xb7, 0x96, 0xfc, 0x3f, 0x57, 0x6c, 0x13, 0x99, 0x96, 0x3d, 0xab, 0x5b, 0x35, 0x9b, 0xe9, 0x3c, 0x38, 0x4a, 0x8c, 0x9e, 0x92, 0x3b, 0x3a, 0x39, 0x12, 0xa1, 0x9b, 0x3d, 0x89, 0x29, 0x69, 0xa3, 0xf8, 0x42, 0x10, 0x1a, 0x81, 0xad, 0xb7, 0x53, 0x55, 0x1c, 0xbf, 0xb4, 0x1e, 0x5e, 0x85, 0x19, 0xe0, 0xbc, 0x9a, 0x6d, 0xe2, 0x1a, 0x35, 0x2c, 0xa9, 0x2f, 0x2d, 0xd0, 0xba, 0x76, 0xd1, 0x2c, 0x60, 0xea, 0xbe, 0x88, 0xe7, 0x2c, 0x93, 0xe7, 0xdc, 0x9f, 0x1f, 0x30, 0xf4, 0xe7, 0x16, 0xa2, 0x50, 0x34, 0x23, 0xcc, 0x66, 0xa0, 0xb7, 0x37, 0xac, 0xb1, 0x90, 0xa0, 0x34, 0x38, 0x0d, 0x9b, 0xf3, 0xa3, 0x1c, 0x35, 0x9a, 0x85, 0x73, 0xa5, 0x50, 0x33, 0x78, 0x71, 0xc6, 0xa6, 0xf5, 0x31, 0xae, 0x5f, 0xee, 0xa8, 0x8b, 0x2f, 0xd0, 0x4e, 0x8b, 0xab, 0xf1, 0x2a, 0xb6, 0x3b, 0x70, 0xaf, 0x61, 0x27, 0x37, 0x27, 0x90, 0xb9, 0x77, 0x2d, 0x2d, 0x1d, 0x24, 0xc2, 0x37, 0x40, 0xa9, 0x1e, 0x32, 0xc5, 0x9c, 0x52, 0x36, 0x1f, 0x92, 0xc6, 0xb6, 0x61, 0xb5, 0x1f, 0x60, 0x7f, 0x38, 0x29, 0x91, 0xe5, 0x5d, 0x8c, 0xad, 0x29, 0x43, 0xe2, 0x2b, 0x9b, 0x4b, 0x29, 0xd8, 0xdf, 0xf8, 0xab, 0x06, 0x29, 0x6f, 0xdd, 0x32, 0xba, 0xb1, 0x24, 0xeb, 0xdc, 0x71, 0xb8, 0xee, 0x26, 0xc9, 0xc3, 0x80, 0xb7, 0x15, 0x26, 0xe1, 0xa9, 0xe9, 0xb6, 0xdc, 0x25, 0x54, 0x90, 0x4d, 0xb6, 0xcc, 0x24, 0x25, 0x7a, 0x34, 0xb6, 0x5b, 0x23, 0x1f, 0x66, 0x11, 0xb6, 0xf1, 0x1f, 0xf1, 0x53, 0x40, 0xb9, 0xa8, 0x19, 0x75, 0x3e, 0x01, 0xbd, 0x7c, 0x12, 0xb9, 0x28, 0xf4, 0xc7, 0x6e, 0x26, 0x71, 0x26, 0xd2, 0xc8, 0xb5, 0x2a, 0x53, 0x26, 0xfc, 0xc9, 0x4a, 0x3f, 0x7b, 0x23, 0xc8, 0xc9, 0xbe, 0x52, 0x16, 0x22, 0xe8, 0x8d, 0x88, 0x28, 0x45, 0xe0, 0x7a, 0x9e, 0x2b, 0x28, 0xd7, 0xde, 0x07, 0xa9, 0x1e, 0x28, 0x81, 0xdc, 0x16, 0xb5, 0x30, 0x27, 0x14, 0xdb, 0xae, 0xc5, 0x2c, 0x21, 0xe7, 0xdb, 0xf2, 0xcd, 0xde, 0x1e, 0x6d, 0xd0, 0x83, 0xca, 0x96, 0x1f, 0x38, 0xb7, 0x72, 0xca, 0x9e, 0x1d, 0x06, 0x9d, 0xb5, 0xc9, 0x34, 0x1d, 0x50, 0x84, 0x3d, 0xc8, 0x01, 0x1e, 0x2d, 0x70, 0x53, 0xc8, 0xf2, 0x1e, 0x58, 0x5d, 0x0d, 0xc9, 0x88, 0x1c, 0xd5, 0x4b, 0x0e, 0xcb, 0x13, 0x1b, 0x74, 0x2f, 0xcf, 0xcb, 0x36, 0x27, 0x3a, 0x2a, 0x11, 0xcb, 0x16, 0x29, 0xe7, 0x29, 0x2b, 0xcb, 0x02, 0x2b, 0x63, 0x28, 0xb1, 0xca, 0xf5, 0x2c, 0x53, 0x28, 0x65, 0xa0, 0x31, 0x28, 0x72, 0xdd, 0x1c, 0xa7, 0xd0, 0x28, 0x38, 0xdb, 0xc0, 0xae, 0xd8, 0x27, 0x3e, 0xda, 0xb8, 0xc3, 0xb6, 0x23, 0xe6, 0xdd, 0x1f, 0xd3, 0x70, 0x1f, 0x48, 0xdd, 0x5d, 0xd0, 0xfd, 0x21, 0x03, 0xcf, 0x6e, 0xd1, 0xbb, 0x20, 0x11, 0xbc, 0xc1, 0xd1, 0xd2, 0x1d, 0x76, 0xa4, 0x9c, 0xd0, 0x10, 0x1e, 0x05, 0x8a, 0x84, 0xcd, 0xd7, 0x20, 0x20, 0x77, 0x1b, 0xcd, 0x26, 0x21, 0x09, 0x64, 0x14, 0xcd, 0x6b, 0x21, 0x4a, 0x53, 0xb1, 0xce, 0xa6, 0x1e, 0x5a, 0x38, 0x71, 0xcc, 0x88, 0x27, 0x31, 0x2b, 0x69, 0xcc, 0x22, 0x29, 0x3f, 0x2a, 0x64, 0xcb, 0xe1, 0x2a, 0x90, 0x29, 0xc3, 0xcb, 0xb4, 0x2b, 0x7a, 0x29, 0x57, 0x1e, 0xe1, 0xad, 0xdb, 0xb9, 0x26, 0x20, 0xf0, 0xad, 0x2c, 0xb1, 0x62, 0x21, 0x1e, 0xad, 0x1d, 0xaf, 0xe1, 0x22, 0x48, 0xaf, 0xea, 0xad, 0x45, 0x22, 0x63, 0xb3, 0x4b, 0xaa, 0xe7, 0x23, 0x2c, 0xab, 0x21, 0x9d, 0xae, 0x27, 0x3b, 0xa8, 0x5e, 0x96, 0x8b, 0x27, 0x99, 0xa7, 0x76, 0x93, 0xdc, 0x28, 0xc0, 0xa7, 0x02, 0x88, 0x79, 0x29, 0xf3, 0xa5, 0xd8, 0x7e, 0xe6, 0x29, 0x80, 0xa5, 0x47, 0x75, 0x2d, 0x25, 0xe6, 0xa5, 0x2e, 0x65, 0xd0, 0x18, 0x93, 0xa5, 0x97, 0x45, 0x55, 0x1e, 0x0c, 0xbc, 0xd5, 0x28, 0xdf, 0x23, 0x85, 0xc3, 0x5c, 0x28, 0x32, 0x26, 0x8a, 0xc6, 0xbf, 0x27, 0xcf, 0x28, 0x26, 0xc7, 0x2e, 0x27, 0x0d, 0x1d, 0x26, 0xae, 0x18, 0xc5, 0xea, 0x1d, 0x81, 0xae, 0x2d, 0xbe, 0x86, 0x20, 0xcf, 0xac, 0xba, 0xb1, 0x3a, 0x21, 0x0a, 0xac, 0x8f, 0xaf, 0x1e, 0x22, 0x7b, 0xaf, 0x2b, 0xab, 0x3c, 0x21, 0xb5, 0xb0, 0xb4, 0xa7, 0x3d, 0x23, 0x7c, 0xa9, 0x6c, 0x99, 0x83, 0x27, 0x46, 0xa6, 0xd1, 0x93, 0x9d, 0x27, 0xe8, 0xa6, 0x2b, 0x88, 0xba, 0x29, 0xda, 0xa4, 0xdb, 0x7e, 0x6e, 0x29, 0x06, 0xa3, 0x14, 0x71, 0x02, 0x25, 0x39, 0xa3, 0x05, 0x62, 0xcd, 0x16, 0x95, 0xa1, 0xcb, 0x40, 0x7a, 0x1a, 0xf0, 0xa6, 0x53, 0x25, 0x0d, 0x22, 0x0b, 0xb1, 0x3e, 0x23, 0x01, 0x25, 0xe8, 0xbb, 0x2f, 0x23, 0x4f, 0x27, 0xfa, 0xbd, 0xea, 0x22, 0xc0, 0x17, 0xbe, 0xb7, 0xad, 0xd9, 0x4b, 0x1c, 0xe0, 0xad, 0x7c, 0xc5, 0xaf, 0x1d, 0x28, 0xad, 0x73, 0xbe, 0xd4, 0x1e, 0xb1, 0xac, 0x62, 0xb3, 0x17, 0x20, 0xdf, 0xab, 0x53, 0xad, 0x70, 0x22, 0x66, 0xab, 0xa7, 0xa6, 0x4c, 0x23, 0xec, 0xa5, 0xe3, 0x9b, 0xc3, 0x23, 0xed, 0xa1, 0xc2, 0x90, 0x14, 0x29, 0x97, 0x9c, 0xd1, 0x84, 0x69, 0x2d, 0x55, 0x98, 0x71, 0x76, 0xcf, 0x2b, 0x5c, 0x95, 0xc2, 0x66, 0xcf, 0x1f, 0xc2, 0x96, 0x78, 0x4f, 0xd8, 0x16, 0xb0, 0x99, 0x28, 0x36, 0x67, 0x1a, 0x2c, 0x99, 0xcd, 0x1e, 0x2b, 0x22, 0x2a, 0xa1, 0x77, 0x1b, 0xf2, 0x26, 0xb8, 0xab, 0x1a, 0x1c, 0x68, 0x29, 0xd2, 0xb1, 0xd1, 0x1e, 0x04, 0x17, 0xe9, 0xae, 0x66, 0xda, 0xfa, 0x16, 0x2b, 0xb4, 0x3f, 0xd8, 0xde, 0x1c, 0x5c, 0xac, 0x52, 0xc5, 0x3f, 0x1c, 0x77, 0xab, 0xe2, 0xbe, 0x40, 0x1d, 0xbd, 0xaa, 0x2e, 0xb2, 0xde, 0x25, 0x4a, 0xa3, 0x09, 0xa3, 0x6f, 0x28, 0xfb, 0x9e, 0x16, 0x96, 0xb4, 0x2d, 0x76, 0x9a, 0xf7, 0x8b, 0xe9, 0x31, 0x14, 0x96, 0x33, 0x7e, 0xe6, 0x32, 0x70, 0x93, 0x16, 0x70, 0xd0, 0x31, 0x1d, 0x8f, 0xfd, 0x60, 0x40, 0x2b, 0xb5, 0x8f, 0x72, 0x48, 0x20, 0x25, 0x3b, 0x8d, 0x8d, 0x2d, 0xd5, 0x25, 0xd7, 0x8c, 0x9c, 0x15, 0xee, 0x27, 0x4c, 0x94, 0xb8, 0x11, 0x51, 0x41, 0xce, 0x9d, 0x77, 0x0f, 0x40, 0x62, 0xd0, 0xaa, 0x32, 0x11, 0x45, 0x19, 0x92, 0xab, 0x0d, 0xdb, 0xc8, 0x17, 0xc1, 0xac, 0x7c, 0xda, 0x0d, 0x13, 0xaa, 0xaf, 0x57, 0xd7, 0x51, 0x17, 0x68, 0xaa, 0x69, 0xc6, 0x0b, 0x1d, 0x1d, 0xa3, 0xa4, 0xb7, 0x00, 0x28, 0xa8, 0x9d, 0xd8, 0xa4, 0x53, 0x2e, 0xbb, 0x97, 0xdf, 0x95, 0x5c, 0x33, 0xc2, 0x92, 0xaa, 0x86, 0xa5, 0x37, 0x2b, 0x8e, 0x24, 0x78, 0xdc, 0x38, 0x1a, 0x8a, 0xb1, 0x69, 0xb3, 0x37, 0xaa, 0x87, 0xff, 0x56, 0xd5, 0x36, 0xe1, 0x87, 0x04, 0x40, 0x1c, 0x39, 0x93, 0x87, 0xb7, 0x2a, 0xba, 0x3c, 0xe3, 0x86, 0xf3, 0x16, 0x6a, 0x4a, 0x35, 0x91, 0x8b, 0x13, 0x7a, 0x5d, 0x25, 0x9c, 0x5d, 0x14, 0x3b, 0x6b, 0xf5, 0xa7, 0xaf, 0x14, 0x53, 0x19, 0xe9, 0xa0, 0xfd, 0xdd, 0x22, 0x1a, 0xc9, 0xa3, 0x6a, 0xda, 0x68, 0x17, 0x6a, 0xa6, 0xc5, 0xd7, 0xe0, 0x0f, 0x2d, 0xa6, 0x8c, 0xcf, 0xcb, 0x21, 0xac, 0x9d, 0x17, 0xb8, 0x38, 0x2f, 0x83, 0x94, 0x72, 0xa2, 0xbb, 0x36, 0xae, 0x8e, 0x8b, 0x91, 0x1b, 0x3c, 0x38, 0x8a, 0x13, 0x82, 0x82, 0x40, 0xcf, 0x85, 0x49, 0x73, 0x7d, 0x42, 0xa5, 0x82, 0x89, 0x62, 0x4f, 0x44, 0x52, 0x80, 0x4b, 0x4e, 0x56, 0x48, 0x42, 0x80, 0xb2, 0x3a, 0x15, 0x4e, 0x5e, 0x83, 0x05, 0x2b, 0x3e, 0x55, 0x1e, 0x83, 0x55, 0x19, 0x8a, 0x63, 0xfe, 0x8d, 0xe5, 0x16, 0x82, 0x70, 0x6e, 0x98, 0x34, 0x17, 0x6a, 0x7c, 0x65, 0xa1, 0x9a, 0x1b, 0xb1, 0x1a, 0xc5, 0x97, 0x83, 0xe0, 0x74, 0x1b, 0x38, 0x99, 0xb0, 0xdd, 0xdb, 0x18, 0x89, 0x9d, 0x1d, 0xdb, 0x36, 0x1b, 0x04, 0x9b, 0x57, 0xcf, 0x31, 0x2b, 0xa9, 0x92, 0x54, 0xb7, 0xe9, 0x38, 0x89, 0x8a, 0x76, 0xa2, 0x06, 0x42, 0x12, 0x85, 0x0a, 0x8f, 0x6c, 0x48, 0xd9, 0x80, 0xf4, 0x7e, 0xa2, 0x4e, 0x14, 0x7d, 0x93, 0x6e, 0xfd, 0x52, 0x42, 0x7b, 0x55, 0x5e, 0x47, 0x55, 0xdc, 0x7a, 0x6f, 0x4b, 0xee, 0x5a, 0x62, 0x7c, 0x76, 0x3a, 0xbc, 0x5f, 0xca, 0x7f, 0x84, 0x2c, 0xe5, 0x66, 0xcf, 0x82, 0x3c, 0x1f, 0x22, 0x6f, 0xf6, 0x8c, 0x3d, 0x19, 0x42, 0x7c, 0xd3, 0x97, 0xa0, 0x1a, 0xc5, 0x8d, 0x80, 0xa1, 0xeb, 0x1d, 0x69, 0x1d, 0xee, 0x8b, 0x1c, 0xe2, 0xc7, 0x1d, 0x6e, 0x8f, 0xd3, 0xe2, 0x39, 0x1c, 0xb2, 0x93, 0x60, 0xe1, 0x42, 0x27, 0x20, 0x8f, 0x74, 0xd0, 0x3d, 0x37, 0xfb, 0x86, 0xdc, 0xb6, 0x5e, 0x44, 0xf8, 0x80, 0x1d, 0xa1, 0xac, 0x4f, 0x58, 0x7a, 0xfc, 0x8f, 0x3f, 0x56, 0xc8, 0x77, 0xdb, 0x7d, 0xd6, 0x5c, 0xe6, 0x75, 0x7f, 0x6d, 0xb4, 0x61, 0xbc, 0x74, 0xca, 0x5c, 0xc0, 0x66, 0x00, 0x75, 0xc8, 0x4c, 0x6a, 0x6a, 0x2f, 0x78, 0xa7, 0x3c, 0xcd, 0x6e, 0x67, 0x7b, 0xde, 0x2e, 0x64, 0x73, 0x88, 0x7f, 0xff, 0x20, 0x36, 0x7c, 0x19, 0x87, 0xea, 0x19, 0xf4, 0x89, 0x03, 0x95, 0xb5, 0x1c, 0x7f, 0x97, 0x78, 0xa2, 0x5c, 0x1e, 0x41, 0x20, 0x54, 0x7b, 0x2f, 0xe5, 0xd1, 0x22, 0x09, 0x7f, 0x0a, 0xe5, 0x9b, 0x26, 0x1f, 0x84, 0xe2, 0xe5, 0x75, 0x36, 0x87, 0x82, 0xee, 0xd1, 0x8c, 0x47, 0x70, 0x7b, 0x09, 0xb6, 0x60, 0x54, 0xa1, 0x74, 0xd5, 0xa1, 0x48, 0x5e, 0x5b, 0x71, 0x3f, 0x8f, 0x02, 0x66, 0x48, 0x6f, 0x53, 0x7e, 0x57, 0x6c, 0xf9, 0x6d, 0xf4, 0x6e, 0x28, 0x70, 0x5b, 0x6e, 0x13, 0x5d, 0x0d, 0x74, 0x16, 0x70, 0x61, 0x4d, 0x72, 0x77, 0xad, 0x73, 0xb6, 0x3e, 0x8f, 0x7b, 0x6a, 0x77, 0xb1, 0x30, 0x57, 0x7e, 0xe2, 0x7b, 0x7b, 0x21, 0xfc, 0x86, 0x4c, 0x82, 0xb6, 0x1b, 0x2d, 0x94, 0x53, 0x90, 0x60, 0x1e, 0x23, 0xa0, 0x5c, 0x9b, 0x11, 0x20, 0x28, 0x26, 0xa0, 0x6b, 0x2a, 0xe5, 0x16, 0x29, 0x6b, 0x70, 0x63, 0xe8, 0x6a, 0x33, 0x72, 0x74, 0x3f, 0xe6, 0xdb, 0x49, 0x1b, 0x74, 0x78, 0xd2, 0xfb, 0x59, 0x21, 0x6e, 0xb2, 0xb6, 0xfc, 0x65, 0xd0, 0x6a, 0x86, 0xa1, 0x22, 0x6f, 0x4d, 0x68, 0xa9, 0x8f, 0xc8, 0x75, 0x5d, 0x67, 0x7a, 0x7f, 0x30, 0x7a, 0x28, 0x66, 0xd7, 0x6e, 0x34, 0x7e, 0x2b, 0x66, 0x70, 0x5d, 0xcb, 0x81, 0xad, 0x68, 0x3f, 0x4e, 0x33, 0x84, 0xf4, 0x6b, 0xea, 0x3f, 0x5f, 0x88, 0x29, 0x70, 0x20, 0x31, 0x99, 0x8a, 0xd5, 0x74, 0x91, 0x24, 0x09, 0x92, 0x6d, 0x7d, 0x1e, 0x1e, 0x49, 0x9e, 0x8e, 0x87, 0x3f, 0x1f, 0x0b, 0xab, 0xf3, 0x94, 0x15, 0x21, 0x1c, 0x2b, 0xd5, 0x30, 0x47, 0xd1, 0x59, 0x33, 0x21, 0x5a, 0x64, 0xe6, 0x71, 0x47, 0x68, 0x65, 0x71, 0xed, 0x39, 0x5d, 0xbe, 0x65, 0xdc, 0xd5, 0xcf, 0x6c, 0xc0, 0x63, 0x16, 0xb8, 0x37, 0x76, 0xf5, 0x61, 0x9c, 0xa2, 0x52, 0x7e, 0x92, 0x60, 0x74, 0x91, 0x72, 0x82, 0xc5, 0x5f, 0x9b, 0x82, 0x08, 0x87, 0x41, 0x5e, 0x2a, 0x6f, 0xaa, 0x8b, 0x4f, 0x5d, 0x64, 0x5e, 0xb7, 0x8e, 0xf5, 0x5e, 0xfb, 0x4e, 0xaa, 0x91, 0xa7, 0x62, 0x48, 0x3f, 0xfa, 0x94, 0x3b, 0x66, 0xd5, 0x32, 0x74, 0x96, 0x50, 0x6b, 0x3c, 0x25, 0xba, 0x9d, 0x28, 0x74, 0x02, 0x1f, 0x29, 0xa9, 0xbc, 0x7d, 0xc8, 0x1f, 0xbc, 0xb2, 0xbb, 0x89, 0x11, 0x1f, 0xc7, 0x2b, 0xd0, 0x30, 0x30, 0xd1, 0x80, 0x2d, 0x18, 0x2e, 0xaa, 0xd2, 0x92, 0x63, 0x62, 0x53, 0xba, 0xf1, 0xeb, 0x75, 0xa8, 0x57, 0xbd, 0xda, 0xe2, 0x81, 0x22, 0x58, 0x26, 0xbd, 0x58, 0x87, 0x93, 0x58, 0xbc, 0xa7, 0x31, 0x8b, 0xc5, 0x58, 0x0f, 0x95, 0xb2, 0x90, 0x20, 0x56, 0xe8, 0x84, 0x48, 0x94, 0x6d, 0x54, 0x43, 0x71, 0x94, 0x97, 0xbe, 0x53, 0x0e, 0x60, 0x39, 0x9b, 0x1a, 0x53, 0xde, 0x50, 0x6a, 0x9d, 0xf4, 0x56, 0x9e, 0x40, 0x87, 0x9f, 0xcb, 0x5a, 0x7f, 0x33, 0xa2, 0xa1, 0x77, 0x5f, 0x66, 0x26, 0x34, 0xa8, 0x43, 0x67, 0xe3, 0x1e, 0xd4, 0xaf, 0xb5, 0x72, 0x71, 0x1d, 0x80, 0xb7, 0xd3, 0x7c, 0x17, 0x1e, 0x1e, 0x2c, 0xfc, 0x2e, 0x73, 0xd2, 0x6a, 0x2d, 0x4a, 0x2e, 0x87, 0xd2, 0xa2, 0x7d, 0x88, 0x40, 0xea, 0xf9, 0x73, 0x92, 0x22, 0x49, 0x0a, 0xe5, 0x5b, 0x98, 0x73, 0x4c, 0x9d, 0xc8, 0xbd, 0x98, 0x2c, 0x4e, 0xea, 0xaf, 0x48, 0x99, 0xb5, 0x4f, 0x23, 0x9c, 0xb1, 0x9e, 0x0d, 0x4c, 0xae, 0x88, 0x25, 0xa1, 0xfa, 0x49, 0x58, 0x74, 0x58, 0xa4, 0x8c, 0x47, 0x97, 0x63, 0x4f, 0xa7, 0x13, 0x46, 0xe3, 0x52, 0x84, 0xa9, 0xd7, 0x46, 0xbb, 0x41, 0x7e, 0xac, 0x1c, 0x4a, 0x59, 0x31, 0x5e, 0xae, 0x95, 0x4d, 0x00, 0x22, 0x22, 0xb3, 0xc3, 0x54, 0x4b, 0x19, 0xe0, 0xbc, 0xf1, 0x63, 0x8d, 0x17, 0xa1, 0xc3, 0x84, 0x70, 0xad, 0x1a, 0xe2, 0x2c, 0xb4, 0x2f, 0x37, 0xd0, 0xc6, 0x7e, 0x2e, 0x2f, 0xb4, 0xed, 0xd9, 0x8a, 0x9f, 0x34, 0x4f, 0xea, 0x83, 0xab, 0xb4, 0x3b, 0xa9, 0xf1, 0x72, 0xae, 0x24, 0x3e, 0xca, 0xd7, 0x1a, 0xab, 0x78, 0x42, 0xea, 0xbb, 0xf1, 0xaa, 0x80, 0x43, 0x17, 0xa6, 0x4f, 0xae, 0x1d, 0x3e, 0xfe, 0x8e, 0x90, 0xb0, 0x4b, 0x3b, 0x8b, 0x79, 0x6f, 0xb2, 0x76, 0x38, 0xe4, 0x67, 0x69, 0xb4, 0x78, 0x36, 0xd0, 0x56, 0x78, 0xb7, 0x5f, 0x33, 0xcc, 0x43, 0xe2, 0xbb, 0x18, 0x31, 0x3c, 0x2e, 0xdc, 0xbd, 0xf4, 0x33, 0xc6, 0x1c, 0xb8, 0xc5, 0xdb, 0x47, 0x12, 0x20, 0x8d, 0xc7, 0xba, 0x57, 0x9f, 0x21, 0x0e, 0xc8, 0x63, 0x66, 0x37, 0x20, 0x3a, 0x82, 0x07, 0x2b, 0x91, 0xe7, 0x88, 0x8f, 0xd9, 0x2b, 0xa0, 0xe4, 0xcd, 0xa1, 0x62, 0x2c, 0xd8, 0xe3, 0x02, 0xb2, 0x13, 0x2e, 0x04, 0xe2, 0x74, 0xc5, 0xff, 0x2f, 0xcf, 0xe6, 0xd3, 0xc4, 0x3e, 0x30, 0x3f, 0xce, 0xa5, 0xc2, 0x5f, 0x30, 0x34, 0xb4, 0xd9, 0xc2, 0x4a, 0x2e, 0x2c, 0x9a, 0x8d, 0xc2, 0x44, 0x2c, 0x29, 0x83, 0x36, 0xc2, 0x50, 0x29, 0xe5, 0x6e, 0x2e, 0xc3, 0x3a, 0x26, 0xaf, 0x5a, 0xdf, 0xc5, 0xaf, 0x20, 0xea, 0x45, 0x90, 0xc9, 0x49, 0x1b, 0x5a, 0x30, 0x5a, 0xca, 0xde, 0x2a, 0x13, 0x28, 0xe8, 0xca, 0xd1, 0x2c, 0xbe, 0x28, 0x27, 0xca, 0xaf, 0x45, 0xab, 0x24, 0xbe, 0xcb, 0x6f, 0x57, 0x4b, 0x23, 0xc4, 0x92, 0xa4, 0x29, 0xeb, 0xe1, 0x9d, 0xa2, 0x41, 0x2a, 0x9a, 0xdf, 0xc3, 0xad, 0xf7, 0x2a, 0xad, 0xde, 0xa6, 0xc0, 0x34, 0x2a, 0x26, 0xe0, 0x49, 0xcc, 0xa9, 0x26, 0xf4, 0xe1, 0xee, 0xda, 0x59, 0x1e, 0x21, 0xdf, 0x55, 0xd7, 0xf1, 0x1d, 0xf2, 0xc3, 0x7e, 0xd7, 0x2a, 0x1c, 0x9b, 0xa7, 0x91, 0xd1, 0x97, 0x1f, 0xb5, 0x89, 0xb5, 0xce, 0xf2, 0x21, 0x53, 0x74, 0xc8, 0xce, 0x23, 0x21, 0xe3, 0x61, 0x7a, 0xce, 0x82, 0x20, 0x3a, 0x4e, 0x3b, 0xcf, 0xe9, 0x1f, 0xac, 0x32, 0xcb, 0xcc, 0xe2, 0x28, 0xd0, 0x2b, 0x43, 0xcc, 0x42, 0x2b, 0x06, 0x2a, 0x02, 0xcb, 0xe9, 0x2c, 0x40, 0x29, 0x56, 0xcb, 0xb1, 0x2d, 0x09, 0x28, 0xea, 0xa1, 0xc9, 0x29, 0xaa, 0xde, 0x8b, 0xab, 0xf5, 0x29, 0xa1, 0xdd, 0x6c, 0xb3, 0x88, 0x29, 0x42, 0xdd, 0x9a, 0xc6, 0xa8, 0x26, 0x7c, 0xdf, 0xf7, 0xd9, 0xb6, 0x23, 0x21, 0xe1, 0xd2, 0xd6, 0x89, 0x23, 0xd3, 0xd4, 0xf2, 0xd6, 0x5e, 0x23, 0x74, 0xc0, 0xd2, 0xd6, 0x4d, 0x21, 0x53, 0xa8, 0x69, 0xd3, 0xb9, 0x22, 0x24, 0x8d, 0xa1, 0xd1, 0x1a, 0x23, 0x2c, 0x79, 0x72, 0xd0, 0x1e, 0x23, 0x63, 0x66, 0x11, 0xd0, 0x2c, 0x23, 0x6b, 0x55, 0x59, 0xd1, 0x2c, 0x20, 0xa4, 0x3a, 0x7f, 0xcd, 0xb6, 0x28, 0x50, 0x2c, 0x40, 0xcd, 0x0b, 0x2a, 0x1d, 0x2b, 0x0a, 0xcc, 0x9d, 0x2b, 0x45, 0x2a, 0x4a, 0xcc, 0x53, 0x2c, 0x13, 0x29, 0xc8, 0x1e, 0x58, 0xc1, 0x4b, 0xd5, 0xe6, 0x21, 0x4f, 0xaf, 0x15, 0xb3, 0x58, 0x21, 0x84, 0xaf, 0x4c, 0xb2, 0x1d, 0x22, 0xde, 0xb3, 0x78, 0xaf, 0xc5, 0x22, 0xbc, 0xb6, 0xda, 0xae, 0x2d, 0x23, 0x05, 0xb6, 0xe9, 0xa8, 0x2e, 0x27, 0x70, 0xaa, 0xfa, 0x98, 0xb3, 0x27, 0xda, 0xaa, 0x57, 0x95, 0xb5, 0x2a, 0x54, 0xad, 0x50, 0x8d, 0x05, 0x2a, 0xd1, 0xa7, 0xdc, 0x7f, 0x38, 0x29, 0x94, 0xa7, 0x11, 0x75, 0xa8, 0x26, 0x4b, 0xa7, 0x43, 0x67, 0x02, 0x19, 0xe9, 0xa9, 0xa9, 0x47, 0x0c, 0x21, 0x9b, 0xc7, 0x81, 0x2b, 0x7e, 0x24, 0xe8, 0xc7, 0xfa, 0x29, 0x75, 0x27, 0x14, 0xc8, 0x43, 0x28, 0x3b, 0x28, 0x9c, 0xc8, 0x76, 0x27, 0x6a, 0x1e, 0x38, 0xc0, 0xb0, 0xd7, 0x22, 0x1e, 0x2c, 0xc0, 0x98, 0xd5, 0x24, 0x1f, 0xe2, 0xaf, 0x63, 0xb4, 0x96, 0x21, 0x97, 0xaf, 0x91, 0xb2, 0x33, 0x23, 0x1b, 0xb4, 0x00, 0xaf, 0x0f, 0x22, 0x46, 0xb6, 0x6b, 0xac, 0x6a, 0x24, 0x3a, 0xb5, 0x1b, 0xa3, 0x5c, 0x27, 0xa9, 0xab, 0x2b, 0x97, 0x31, 0x2a, 0x40, 0xac, 0x09, 0x8d, 0xc6, 0x2a, 0xd6, 0xa8, 0x91, 0x7f, 0xff, 0x29, 0x3a, 0xa6, 0xcf, 0x72, 0xb4, 0x25, 0xea, 0xa6, 0x9a, 0x64, 0xd3, 0x18, 0xb3, 0xa8, 0x12, 0x43, 0x1b, 0x1f, 0xc3, 0xbb, 0xcc, 0x28, 0x3f, 0x25, 0x19, 0xc2, 0x20, 0x26, 0x8b, 0x28, 0x74, 0xc6, 0xae, 0x26, 0xa0, 0x2a, 0x08, 0xc7, 0x33, 0x25, 0xfb, 0x18, 0xff, 0xb9, 0xc9, 0xdb, 0x9c, 0x1b, 0x5a, 0xbf, 0x01, 0xd8, 0xbd, 0x1d, 0xe0, 0xbf, 0x61, 0xd3, 0xd1, 0x1f, 0xdc, 0xaf, 0xc0, 0xb6, 0xf8, 0x21, 0x73, 0xb0, 0x05, 0xb2, 0xd1, 0x23, 0x21, 0xb3, 0xe3, 0xad, 0xc6, 0x25, 0x81, 0xb0, 0x87, 0xa6, 0x44, 0x29, 0x1d, 0xab, 0xe7, 0x99, 0xb0, 0x2a, 0x90, 0xaa, 0xb3, 0x8e, 0xda, 0x2b, 0x34, 0xa7, 0xef, 0x80, 0xde, 0x28, 0xb1, 0xa4, 0xd8, 0x6f, 0xd0, 0x20, 0x95, 0xa1, 0xad, 0x57, 0x64, 0x17, 0xb8, 0xa1, 0xfa, 0x37, 0xf6, 0x1f, 0x07, 0xa8, 0x2a, 0x22, 0x56, 0x26, 0x15, 0xb1, 0xf6, 0x20, 0x67, 0x29, 0x09, 0xba, 0x0c, 0x20, 0xcf, 0x49, 0x77, 0xc4, 0x2b, 0x1c, 0xbb, 0x17, 0x5d, 0xb2, 0xec, 0xdf, 0x13, 0x18, 0x5e, 0xb8, 0x3d, 0xdb, 0xc8, 0x1a, 0x53, 0xbb, 0x89, 0xd7, 0xf7, 0x1e, 0x4b, 0xb0, 0x5e, 0xc3, 0x53, 0x1f, 0xfb, 0xb0, 0x76, 0xb9, 0xb8, 0x21, 0x7b, 0xb2, 0xaf, 0xb3, 0x80, 0x2b, 0xd5, 0xab, 0x0d, 0xa3, 0x2d, 0x31, 0xe9, 0xa4, 0x46, 0x94, 0xad, 0x34, 0x05, 0xa0, 0x75, 0x87, 0xa4, 0x34, 0xf6, 0x9d, 0x41, 0x79, 0x36, 0x34, 0x5e, 0x9a, 0x9a, 0x67, 0x9f, 0x30, 0x37, 0x98, 0xd2, 0x4f, 0xcf, 0x29, 0xf8, 0x99, 0x01, 0x32, 0x78, 0x2b, 0x39, 0x98, 0x75, 0x1a, 0x80, 0x2d, 0x58, 0xa2, 0x3a, 0x13, 0xe5, 0x45, 0xde, 0xa6, 0x82, 0x12, 0x52, 0x66, 0x0d, 0xae, 0x66, 0x13, 0x12, 0x1b, 0x54, 0xae, 0xbd, 0xde, 0xc4, 0x19, 0xfe, 0xaf, 0xbb, 0xdd, 0xc2, 0x16, 0xeb, 0xb4, 0xc8, 0xdc, 0x35, 0x18, 0x91, 0xb6, 0xd6, 0xd5, 0x8f, 0x1b, 0xa2, 0xb2, 0xb5, 0xc6, 0x99, 0x29, 0xd6, 0xa9, 0xd5, 0xb1, 0x09, 0x34, 0x03, 0xa1, 0x68, 0x9d, 0xd2, 0x38, 0x76, 0x9b, 0xcd, 0x8f, 0xa2, 0x3b, 0xa5, 0x97, 0x86, 0x81, 0xb9, 0x3d, 0xf7, 0x94, 0x7e, 0x72, 0x3f, 0x3d, 0x70, 0x91, 0xd1, 0x5e, 0xab, 0x3c, 0xd2, 0x90, 0xbd, 0x47, 0x47, 0x3f, 0xce, 0x90, 0xeb, 0x30, 0x44, 0x43, 0xd1, 0x91, 0xba, 0x1c, 0x3a, 0x4e, 0x27, 0x97, 0xa6, 0x12, 0xf9, 0x65, 0x5f, 0xa5, 0xd6, 0x13, 0xc9, 0x70, 0xaf, 0xac, 0x2a, 0x17, 0x15, 0x1c, 0x1a, 0xa4, 0x9a, 0xe0, 0x0c, 0x1d, 0x3f, 0xa7, 0xf1, 0xde, 0x85, 0x1b, 0x02, 0xac, 0xc7, 0xdd, 0xf7, 0x14, 0xfd, 0xb0, 0xd3, 0xdb, 0x91, 0x26, 0x97, 0xa7, 0x66, 0xc4, 0x84, 0x35, 0x19, 0x9e, 0x4a, 0xad, 0xbd, 0x3c, 0xae, 0x97, 0x73, 0x9a, 0x0d, 0x43, 0x08, 0x93, 0x05, 0x8b, 0x9c, 0x49, 0x28, 0x8e, 0xb6, 0x7c, 0xaf, 0x4b, 0x28, 0x8c, 0x5f, 0x6b, 0x52, 0x4d, 0x54, 0x8a, 0x5a, 0x57, 0x51, 0x51, 0x06, 0x8a, 0x67, 0x42, 0x26, 0x56, 0x81, 0x8c, 0x1f, 0x32, 0x13, 0x5c, 0xd2, 0x8d, 0x71, 0x21, 0x01, 0x67, 0x5c, 0x92, 0x9b, 0x16, 0x90, 0x74, 0x9d, 0xa0, 0x7d, 0x19, 0xe3, 0x7f, 0xc2, 0xab, 0x56, 0x18, 0x01, 0x1e, 0x8a, 0x9a, 0xb1, 0xe1, 0xf4, 0x1f, 0x96, 0x9d, 0x28, 0xe0, 0xd1, 0x1e, 0x5e, 0xa2, 0xd9, 0xe1, 0x47, 0x22, 0x00, 0xa5, 0x05, 0xda, 0x99, 0x31, 0xc3, 0x9c, 0x1a, 0xc3, 0x7b, 0x40, 0x57, 0x93, 0x9b, 0xac, 0xab, 0x4a, 0x01, 0x8d, 0xc4, 0x99, 0x33, 0x51, 0x3e, 0x8a, 0x30, 0x87, 0xd3, 0x56, 0x97, 0x87, 0x14, 0x78, 0x4d, 0x5a, 0xfa, 0x84, 0xff, 0x67, 0x8b, 0x5e, 0xec, 0x84, 0x2a, 0x55, 0x04, 0x63, 0x46, 0x85, 0xc6, 0x43, 0x7f, 0x69, 0x44, 0x88, 0x84, 0x34, 0x51, 0x6e, 0x6e, 0x8b, 0x51, 0x26, 0x43, 0x74, 0x86, 0x90, 0x59, 0x19, 0xd0, 0x7f, 0x9f, 0x9a, 0x6d, 0x1b, 0x16, 0x92, 0x76, 0xa9, 0xe5, 0x1a, 0xf5, 0x22, 0x51, 0x8e, 0x0d, 0xe2, 0xfd, 0x23, 0x73, 0x92, 0x74, 0xe2, 0xbc, 0x26, 0x5e, 0x96, 0xda, 0xe2, 0x8a, 0x31, 0x4b, 0x97, 0xd3, 0xd9, 0xa6, 0x40, 0xc8, 0x8f, 0xc8, 0xc2, 0x54, 0x4d, 0xd8, 0x88, 0xdf, 0xac, 0x6c, 0x57, 0xd4, 0x84, 0x2c, 0x99, 0x66, 0x5f, 0x84, 0x81, 0x2a, 0x87, 0x60, 0x65, 0xdf, 0x7e, 0xf0, 0x77, 0x15, 0x6a, 0xee, 0x7e, 0x00, 0x65, 0x9b, 0x6f, 0x0c, 0x7e, 0xdd, 0x55, 0x04, 0x73, 0x01, 0x81, 0x65, 0x45, 0x4e, 0x76, 0xda, 0x84, 0xc0, 0x36, 0x2c, 0x7b, 0x77, 0x88, 0x12, 0x27, 0xc6, 0x7f, 0xd6, 0x8b, 0xeb, 0x1a, 0x83, 0x8c, 0x9c, 0x99, 0x90, 0x1c, 0xf7, 0x98, 0xe3, 0xa4, 0x63, 0x1e, 0x9e, 0x24, 0xce, 0x7d, 0x5a, 0xe6, 0x0c, 0x28, 0x00, 0x82, 0x7c, 0xe5, 0xde, 0x2e, 0xbd, 0x88, 0xac, 0xe5, 0xfb, 0x42, 0xc5, 0x8a, 0x8f, 0xd9, 0x84, 0x51, 0xd7, 0x83, 0x57, 0xc1, 0xbc, 0x5e, 0x50, 0x7d, 0x66, 0xac, 0x2b, 0x67, 0x99, 0x7a, 0xa2, 0x99, 0x3f, 0x6f, 0x97, 0x78, 0xc5, 0x88, 0x0c, 0x76, 0x7e, 0x77, 0x7e, 0x77, 0xb4, 0x79, 0x8f, 0x77, 0x34, 0x66, 0x15, 0x7d, 0x07, 0x78, 0xef, 0x55, 0xf1, 0x80, 0x73, 0x7b, 0xe7, 0x46, 0xfd, 0x83, 0xc5, 0x7f, 0x70, 0x38, 0x08, 0x87, 0x36, 0x83, 0x6a, 0x29, 0xc5, 0x8a, 0x0e, 0x88, 0x4f, 0x1c, 0x9e, 0x98, 0x45, 0x94, 0xbc, 0x1e, 0xfe, 0xa4, 0x1c, 0x9f, 0xa4, 0x20, 0xdd, 0x29, 0xb2, 0x70, 0x2d, 0xe8, 0x78, 0x2e, 0xcc, 0x73, 0x70, 0xe8, 0x5a, 0x3c, 0xc1, 0x7a, 0x40, 0xe8, 0x39, 0x56, 0x80, 0x7b, 0xe4, 0xd9, 0xda, 0x63, 0xd9, 0x77, 0x34, 0xc1, 0xdc, 0x70, 0x53, 0x73, 0x98, 0xab, 0x7b, 0x78, 0xcf, 0x71, 0xb9, 0x99, 0xcf, 0x7f, 0x1b, 0x70, 0xc5, 0x88, 0xf2, 0x84, 0x0a, 0x70, 0x11, 0x77, 0xbb, 0x87, 0xcc, 0x6f, 0x80, 0x66, 0xab, 0x8b, 0x60, 0x70, 0x90, 0x56, 0x4c, 0x8e, 0x4c, 0x73, 0xac, 0x47, 0x66, 0x91, 0x26, 0x77, 0x56, 0x38, 0xa3, 0x93, 0x94, 0x7b, 0xae, 0x2b, 0x3e, 0x96, 0xa1, 0x81, 0x15, 0x1e, 0xba, 0xa3, 0x70, 0x8c, 0x61, 0x20, 0x02, 0xae, 0xa7, 0x97, 0xbd, 0x21, 0x8a, 0x2b, 0xc6, 0x30, 0x4a, 0xd1, 0x86, 0x3a, 0x20, 0x60, 0xcb, 0xec, 0xae, 0x53, 0x24, 0x6b, 0x06, 0xec, 0x32, 0x6c, 0x04, 0x6d, 0xfe, 0xdc, 0x2b, 0x77, 0xdf, 0x6b, 0xde, 0xc3, 0xf0, 0x81, 0x74, 0x6b, 0x08, 0xad, 0xc5, 0x89, 0x0f, 0x6a, 0x1a, 0x9c, 0x70, 0x8c, 0xfd, 0x68, 0xfd, 0x8b, 0xe6, 0x91, 0x98, 0x67, 0x47, 0x79, 0x06, 0x95, 0xa1, 0x66, 0xc4, 0x67, 0xac, 0x99, 0x01, 0x67, 0xde, 0x57, 0x08, 0x9b, 0x57, 0x6a, 0x77, 0x47, 0xd1, 0x9d, 0x93, 0x6d, 0xac, 0x38, 0xd9, 0x9f, 0xbe, 0x72, 0x95, 0x2c, 0x91, 0xa2, 0x22, 0x79, 0x3f, 0x20, 0xf8, 0xad, 0xc6, 0x81, 0xaf, 0x1f, 0xe8, 0xb7, 0x9a, 0x8d, 0x14, 0x20, 0x17, 0x2b, 0xdb, 0x30, 0x39, 0xd1, 0x8c, 0x2d, 0x31, 0x2e, 0xbd, 0xd2, 0xa9, 0x6e, 0xfb, 0x5a, 0x90, 0xf1, 0x12, 0x81, 0x1e, 0x62, 0xb7, 0xdf, 0xe4, 0x8b, 0xc7, 0x61, 0xf6, 0xca, 0x94, 0x92, 0x47, 0x62, 0x6f, 0xb4, 0x09, 0x96, 0xda, 0x62, 0x34, 0xa1, 0x7b, 0x9b, 0x1e, 0x60, 0x9a, 0x8e, 0xc7, 0x9f, 0x37, 0x5d, 0xd3, 0x7b, 0x01, 0xa2, 0x72, 0x5c, 0xa2, 0x68, 0xfa, 0xa5, 0x8d, 0x5d, 0x4a, 0x58, 0xfb, 0xa7, 0xea, 0x5f, 0x8e, 0x48, 0xaf, 0xaa, 0x04, 0x62, 0x38, 0x38, 0xa9, 0xab, 0xa4, 0x65, 0xcf, 0x2b, 0xe5, 0xac, 0xe0, 0x6b, 0xdc, 0x1f, 0xa3, 0xb6, 0x47, 0x75, 0x97, 0x1d, 0x3f, 0xbf, 0x0b, 0x81, 0xc0, 0x1b, 0xf4, 0x2d, 0x0a, 0x2e, 0x7c, 0xd2, 0x77, 0x2d, 0x63, 0x2e, 0x9a, 0xd2, 0xb8, 0x86, 0x0d, 0x4a, 0x77, 0xf5, 0x27, 0x9e, 0x31, 0x52, 0x09, 0xee, 0x75, 0xa1, 0xb3, 0x57, 0x08, 0xd3, 0xea, 0xa4, 0x17, 0x59, 0x45, 0xbc, 0x2c, 0xa5, 0x38, 0x59, 0x73, 0xa8, 0x38, 0xa9, 0x53, 0x56, 0x54, 0x92, 0x99, 0xac, 0xef, 0x53, 0x13, 0x7d, 0x77, 0xaf, 0x72, 0x51, 0x5c, 0x6b, 0xcb, 0xb1, 0xeb, 0x50, 0xb5, 0x5a, 0xc3, 0xb4, 0xcb, 0x50, 0xab, 0x49, 0x78, 0xb7, 0x2c, 0x53, 0x37, 0x37, 0xcb, 0xb8, 0xe3, 0x55, 0x5f, 0x28, 0xc4, 0xb9, 0x7f, 0x5d, 0x2d, 0x1c, 0xe8, 0xc3, 0x58, 0x6a, 0x60, 0x1b, 0xd2, 0xc6, 0x20, 0x76, 0x30, 0x1b, 0x8c, 0x2c, 0xc1, 0x2f, 0x40, 0xd0, 0xd2, 0x81, 0x76, 0x35, 0xf2, 0xed, 0x9c, 0x99, 0x20, 0x36, 0x2e, 0xf1, 0x03, 0xb2, 0x70, 0x42, 0xb1, 0xec, 0x75, 0xba, 0x7c, 0x4a, 0x10, 0xe1, 0xae, 0xb8, 0x89, 0x4c, 0x6d, 0xc8, 0xa7, 0xb6, 0x70, 0x4d, 0x1c, 0xb1, 0x60, 0xb9, 0x59, 0x48, 0xf7, 0x98, 0x69, 0xbb, 0x97, 0x45, 0x14, 0x81, 0xc9, 0xbd, 0xd3, 0x42, 0x35, 0x6f, 0xbf, 0xc0, 0x56, 0x3f, 0x8e, 0x5e, 0x24, 0xc2, 0x8b, 0x3c, 0x41, 0x4a, 0xc5, 0xc6, 0x1d, 0x3a, 0x42, 0x35, 0x57, 0xc8, 0x3a, 0x3d, 0x16, 0x23, 0x3d, 0xc9, 0x5b, 0x4f, 0x48, 0x22, 0xe0, 0xca, 0x26, 0x5d, 0x20, 0x22, 0x61, 0xca, 0x5d, 0x6a, 0x14, 0x20, 0x29, 0x87, 0x72, 0x2d, 0x66, 0xe9, 0x50, 0x95, 0x28, 0x2e, 0x0e, 0xe6, 0xf9, 0xa7, 0x2a, 0x2f, 0xcf, 0xe6, 0x05, 0xb2, 0x7f, 0x34, 0x85, 0xe3, 0xdd, 0xd1, 0x25, 0x3a, 0xed, 0xf1, 0x25, 0xcf, 0x13, 0x3b, 0x24, 0xd9, 0x0e, 0xcc, 0x39, 0x3a, 0xfc, 0xbe, 0x49, 0xcc, 0x68, 0x38, 0x5d, 0xa3, 0x55, 0xcc, 0xcc, 0x35, 0x53, 0x8b, 0x7f, 0xcd, 0x3b, 0x31, 0xd8, 0x75, 0xfa, 0xce, 0xb7, 0x2e, 0x01, 0x62, 0x5c, 0xd1, 0x69, 0x28, 0x28, 0x4d, 0x2b, 0xd5, 0x00, 0x23, 0xea, 0x36, 0xcc, 0xcd, 0xb4, 0x2a, 0xf0, 0x2a, 0xf0, 0xcc, 0x8c, 0x2f, 0x2f, 0x29, 0x1b, 0xcc, 0x99, 0x4b, 0x9e, 0x25, 0xaf, 0xcc, 0x5f, 0x5a, 0x73, 0x24, 0x8e, 0x97, 0xa2, 0x2b, 0x86, 0xe2, 0xb8, 0xa4, 0x7f, 0x2c, 0x55, 0xe1, 0xc4, 0xb1, 0xd8, 0x2c, 0xe6, 0xe1, 0x40, 0xc5, 0xd9, 0x2d, 0x8d, 0xe4, 0x27, 0xd4, 0x33, 0x2c, 0x55, 0xe8, 0x08, 0xd7, 0xbb, 0x2e, 0x5a, 0xda, 0x3c, 0xd8, 0x8b, 0x2c, 0xce, 0xc4, 0xe4, 0xd8, 0xa5, 0x2a, 0x26, 0xa9, 0xac, 0xd7, 0x6f, 0x28, 0x73, 0x8f, 0x9b, 0xd4, 0x2d, 0x27, 0xba, 0x79, 0x4a, 0xd2, 0xb4, 0x26, 0xf0, 0x65, 0x21, 0xd2, 0x98, 0x25, 0x54, 0x50, 0x1b, 0xd3, 0xd9, 0x23, 0x52, 0x35, 0xee, 0xce, 0x8b, 0x2a, 0x66, 0x2c, 0x75, 0xcd, 0x6d, 0x2c, 0x25, 0x2a, 0xd8, 0xcc, 0xd0, 0x2d, 0x1f, 0x29, 0xfa, 0xcd, 0x48, 0x48, 0x67, 0x26, 0xae, 0xa3, 0x60, 0x2a, 0xe4, 0xdf, 0xf8, 0xaf, 0xe2, 0x2b, 0x0c, 0xdf, 0x24, 0xb8, 0x6b, 0x2b, 0x43, 0xe0, 0x0f, 0xca, 0xf1, 0x29, 0x43, 0xe3, 0x6d, 0xd5, 0xde, 0x2b, 0x35, 0xdc, 0x66, 0xd7, 0xea, 0x29, 0xd2, 0xd6, 0x93, 0xd7, 0xb2, 0x29, 0x18, 0xc2, 0x92, 0xd7, 0xd2, 0x27, 0x23, 0xaa, 0xd1, 0xd6, 0x68, 0x26, 0x7b, 0x90, 0x34, 0xd3, 0x95, 0x26, 0xa8, 0x7b, 0x8c, 0xd2, 0x57, 0x26, 0x80, 0x67, 0xf4, 0xd2, 0x7f, 0x26, 0x0e, 0x57, 0x03, 0xd3, 0xae, 0x22, 0xf0, 0x3c, 0x8a, 0xce, 0xe3, 0x29, 0x6f, 0x2d, 0x19, 0xcd, 0xf2, 0x2a, 0xfb, 0x2b, 0xb0, 0xcd, 0x5a, 0x2b, 0xfa, 0x2a, 0xd1, 0xcc, 0xf1, 0x2c, 0xac, 0x2a, 0x3a, 0x1c, 0x9f, 0xc4, 0x6f, 0xd9, 0x49, 0x21, 0xac, 0xb1, 0x00, 0xb5, 0x4f, 0x21, 0xe9, 0xb1, 0x7b, 0xb4, 0x59, 0x23, 0x62, 0xb7, 0x1e, 0xb2, 0x52, 0x23, 0x18, 0xba, 0x6d, 0xb1, 0x75, 0x23, 0x9d, 0xba, 0xcb, 0xab, 0x7e, 0x27, 0x2d, 0xb5, 0x06, 0xa1, 0xc4, 0x28, 0x2e, 0xad, 0x32, 0x97, 0x43, 0x2a, 0x52, 0xaf, 0xbc, 0x8d, 0xf1, 0x2b, 0x1d, 0xb1, 0x55, 0x88, 0x14, 0x29, 0xa3, 0xa8, 0xf0, 0x76, 0x30, 0x26, 0xaf, 0xa9, 0x59, 0x68, 0x33, 0x26, 0x06, 0xcb, 0x0c, 0x5b, 0x53, 0x22, 0x67, 0xc9, 0xe7, 0x2c, 0x29, 0x25, 0x8e, 0xc9, 0xd4, 0x29, 0xfc, 0x27, 0x9f, 0xc9, 0xc7, 0x28, 0xaa, 0x29, 0x12, 0xc9, 0xbd, 0x27, 0xc7, 0x1e, 0xc7, 0xc2, 0xc2, 0xd9, 0x32, 0x1c, 0x8c, 0xc4, 0x50, 0xd9, 0x08, 0x20, 0xac, 0xb1, 0xb0, 0xb7, 0x61, 0x21, 0xdd, 0xb2, 0xa9, 0xb5, 0xb1, 0x1f, 0x3b, 0xbc, 0x13, 0xb5, 0xd6, 0x22, 0xdc, 0xbc, 0x35, 0xb1, 0x9a, 0x25, 0x19, 0xba, 0x59, 0xa7, 0xcd, 0x27, 0xa1, 0xb2, 0x11, 0x9c, 0x91, 0x2a, 0x4c, 0xb0, 0x54, 0x90, 0x44, 0x2b, 0x24, 0xb1, 0xcf, 0x88, 0x4f, 0x29, 0x5b, 0xaa, 0x5b, 0x74, 0x41, 0x26, 0x77, 0xab, 0x62, 0x67, 0xbd, 0x1d, 0x8d, 0xc7, 0x09, 0x50, 0x65, 0x22, 0xe8, 0xc7, 0xb3, 0x2a, 0xa9, 0x26, 0xdd, 0xc8, 0x3d, 0x28, 0x5b, 0x29, 0x21, 0xc8, 0x87, 0x27, 0x27, 0x2a, 0x96, 0xc8, 0xb5, 0x26, 0x6a, 0x1b, 0x2b, 0xbc, 0x9e, 0xde, 0x11, 0x1c, 0xa5, 0xc2, 0xed, 0xdb, 0x4c, 0x1c, 0x68, 0xc4, 0x09, 0xd8, 0x98, 0x1b, 0x5e, 0xca, 0xb7, 0xd6, 0x54, 0x22, 0x90, 0xb5, 0xab, 0xb7, 0x9d, 0x1f, 0x63, 0xbf, 0x8f, 0xb8, 0x56, 0x22, 0x48, 0xc1, 0x33, 0xb5, 0x6d, 0x26, 0x1a, 0xbc, 0x14, 0xa7, 0x2a, 0x29, 0x0f, 0xb9, 0xbe, 0x9a, 0x3c, 0x2a, 0x1c, 0xb6, 0xa5, 0x8a, 0xd6, 0x27, 0xf5, 0xb3, 0xa1, 0x78, 0x6f, 0x20, 0xee, 0xb1, 0x7b, 0x5d, 0x3b, 0x18, 0xa0, 0xb1, 0xcb, 0x3c, 0xc5, 0x22, 0x7c, 0xba, 0xab, 0x25, 0x7f, 0x28, 0x69, 0xc0, 0xe5, 0x23, 0xf0, 0x2b, 0x88, 0xc6, 0x90, 0x24, 0xda, 0x4b, 0xcd, 0xc6, 0xe3, 0x1d, 0x89, 0x18, 0xe1, 0xb6, 0xdb, 0xe1, 0xf8, 0x1a, 0x5b, 0xbb, 0xce, 0xde, 0xbd, 0x1c, 0x15, 0xc0, 0x74, 0xdb, 0xd3, 0x1c, 0x15, 0xc3, 0x56, 0xd7, 0xab, 0x1a, 0x11, 0xc7, 0xf9, 0xd2, 0xf1, 0x1c, 0x6e, 0xc3, 0xb7, 0xc4, 0xbc, 0x28, 0xce, 0xba, 0xfb, 0xb1, 0xda, 0x31, 0x92, 0xb2, 0xad, 0xa0, 0xa6, 0x34, 0xe1, 0xae, 0x88, 0x92, 0x79, 0x37, 0x9d, 0xab, 0x79, 0x83, 0xed, 0x37, 0x02, 0xa8, 0x86, 0x70, 0x57, 0x33, 0x91, 0xa5, 0xea, 0x56, 0xe2, 0x2d, 0x0a, 0xa7, 0x98, 0x36, 0x0a, 0x2d, 0xda, 0xa7, 0xa5, 0x1a, 0x9a, 0x42, 0x3d, 0xbb, 0xd5, 0x1a, 0xa4, 0x51, 0x50, 0xc0, 0xb3, 0x19, 0xb9, 0x72, 0xfa, 0xc5, 0x1d, 0x18, 0x43, 0x1d, 0x1e, 0xb1, 0x3c, 0xe1, 0x9f, 0x1c, 0x40, 0xb2, 0xf8, 0xe1, 0x75, 0x1a, 0x17, 0xba, 0x2a, 0xe0, 0xfd, 0x1b, 0x97, 0xbe, 0x42, 0xdc, 0xb6, 0x1a, 0x1c, 0xc1, 0x1f, 0xd5, 0xe1, 0x29, 0x90, 0xb7, 0xd9, 0xbf, 0x43, 0x35, 0xc8, 0xae, 0x0b, 0xaa, 0x94, 0x3c, 0x9f, 0xa7, 0xad, 0x9a, 0x1f, 0x41, 0xaf, 0xa2, 0xa8, 0x8a, 0xe5, 0x44, 0xf5, 0xa0, 0x40, 0x7b, 0xcb, 0x45, 0x2e, 0x9d, 0x9c, 0x67, 0x32, 0x44, 0x34, 0x9c, 0x1c, 0x4e, 0x96, 0x46, 0xd8, 0x9c, 0x8e, 0x35, 0x39, 0x4b, 0x4c, 0x9e, 0x31, 0x20, 0xff, 0x52, 0x62, 0xa1, 0x5f, 0x0e, 0xa2, 0x68, 0x09, 0xab, 0x6f, 0x13, 0x1c, 0x74, 0xac, 0xb0, 0x6f, 0x16, 0xd9, 0x1f, 0x03, 0xa7, 0x1e, 0xe1, 0xf6, 0x20, 0x24, 0xac, 0x22, 0xe2, 0x06, 0x1f, 0x12, 0xb2, 0x6a, 0xe3, 0x9c, 0x1b, 0x2b, 0xba, 0xf1, 0xe6, 0xfe, 0x29, 0x9b, 0xb3, 0x31, 0xd1, 0x62, 0x37, 0xe7, 0xaa, 0x57, 0xba, 0xad, 0x43, 0xa6, 0xa1, 0x7b, 0xa3, 0x49, 0x4a, 0xa3, 0x9c, 0xf7, 0x94, 0xf2, 0x51, 0x8d, 0x99, 0x05, 0x85, 0xac, 0x54, 0x08, 0x96, 0xfc, 0x74, 0xbc, 0x56, 0xde, 0x95, 0x45, 0x60, 0xba, 0x5a, 0xbc, 0x95, 0x3c, 0x4b, 0x43, 0x5f, 0xd9, 0x96, 0x9a, 0x38, 0x23, 0x64, 0x91, 0x98, 0xb2, 0x28, 0x3d, 0x6a, 0xd8, 0x9a, 0x26, 0x16, 0x93, 0x7a, 0x7e, 0xa7, 0x33, 0x1a, 0x00, 0x8d, 0x19, 0xb8, 0xe1, 0x16, 0x43, 0x23, 0xd0, 0x9b, 0xbb, 0xe1, 0x38, 0x24, 0x53, 0x9f, 0xcf, 0xe1, 0xca, 0x25, 0xab, 0xa5, 0xf4, 0xe2, 0xc5, 0x29, 0xa6, 0xae, 0x88, 0xe5, 0x96, 0x39, 0x7b, 0xa5, 0xc8, 0xce, 0x5e, 0x47, 0x6a, 0x9d, 0x71, 0xb8, 0x1d, 0x51, 0xd1, 0x97, 0x39, 0xa2, 0xdb, 0x59, 0xc6, 0x93, 0xda, 0x91, 0x5d, 0x5f, 0x56, 0x90, 0xd8, 0x81, 0x9c, 0x64, 0x02, 0x8e, 0xda, 0x70, 0xd0, 0x68, 0x31, 0x8e, 0x74, 0x5d, 0xc9, 0x6c, 0x35, 0x8f, 0x87, 0x4c, 0x36, 0x71, 0xce, 0x91, 0xc8, 0x3a, 0xdc, 0x76, 0x90, 0x94, 0x85, 0x2c, 0xb7, 0x79, 0x87, 0x97, 0x91, 0x1c, 0x2d, 0x89, 0x92, 0xa1, 0xa0, 0x1d, 0x5f, 0x96, 0x0d, 0xaf, 0x5a, 0x19, 0xfc, 0x25, 0xe6, 0x90, 0xe1, 0xe3, 0x22, 0x28, 0x86, 0x94, 0x63, 0xe2, 0xa5, 0x2c, 0xb0, 0x99, 0xea, 0xe3, 0x5f, 0x3a, 0xac, 0xa0, 0x6f, 0xe3, 0x91, 0x4a, 0x05, 0x98, 0x95, 0xcc, 0x3a, 0x56, 0xab, 0x91, 0x11, 0xb6, 0xc1, 0x60, 0x78, 0x8d, 0x64, 0xa3, 0x33, 0x68, 0x9b, 0x8a, 0xa0, 0x91, 0x20, 0x6f, 0x13, 0x88, 0x7e, 0x80, 0x91, 0x74, 0x4e, 0x87, 0x82, 0x6e, 0xe2, 0x78, 0x3c, 0x88, 0x46, 0x5d, 0xc7, 0x7b, 0xcf, 0x8a, 0x61, 0x4d, 0xb1, 0x7f, 0x47, 0x8d, 0x41, 0x3e, 0x10, 0x83, 0x2d, 0x90, 0x43, 0x2e, 0xd2, 0x86, 0x7f, 0x94, 0x50, 0x1f, 0xfc, 0x91, 0xc2, 0x9f, 0x5f, 0x1d, 0x87, 0x9f, 0xe2, 0xab, 0xfa, 0x1d, 0xd9, 0x28, 0x9b, 0x7f, 0xdf, 0xe6, 0x2c, 0x2c, 0x94, 0x85, 0xb3, 0xe6, 0x04, 0x35, 0x66, 0x8c, 0xad, 0xe4, 0xfc, 0x4d, 0x9a, 0x92, 0xbb, 0xe2, 0x8d, 0x5b, 0x78, 0x8c, 0x7d, 0xcc, 0x0b, 0x67, 0x34, 0x86, 0x70, 0xb6, 0x97, 0x71, 0x11, 0x83, 0xad, 0xa3, 0xb4, 0x79, 0x11, 0x82, 0x49, 0x91, 0xf9, 0x80, 0x14, 0x81, 0x1a, 0x81, 0x50, 0x83, 0x2f, 0x80, 0xad, 0x6f, 0x38, 0x86, 0x75, 0x81, 0xc0, 0x5e, 0x87, 0x89, 0xa2, 0x84, 0x0c, 0x4f, 0x06, 0x8c, 0xa3, 0x87, 0x88, 0x40, 0x43, 0x8f, 0xcd, 0x8b, 0x8e, 0x31, 0x4d, 0x92, 0x84, 0x8f, 0xbd, 0x23, 0x47, 0x9c, 0x51, 0x98, 0xc5, 0x1f, 0x6c, 0xa9, 0xbc, 0xa6, 0x23, 0x21, 0x0c, 0x2d, 0x44, 0x72, 0xdd, 0xe8, 0x4e, 0x32, 0xec, 0x74, 0xad, 0xe6, 0xd2, 0x49, 0xc8, 0x7d, 0xf1, 0xe7, 0x01, 0x61, 0x49, 0x84, 0x36, 0xe3, 0x46, 0x6e, 0x46, 0x80, 0x8a, 0xcc, 0xf1, 0x7a, 0x4f, 0x7c, 0x1f, 0xb5, 0xbf, 0x83, 0x22, 0x7b, 0x2c, 0xa4, 0x6c, 0x89, 0x33, 0x7a, 0x80, 0x93, 0x40, 0x8e, 0x0d, 0x79, 0x97, 0x81, 0x83, 0x91, 0xdd, 0x78, 0xe6, 0x6f, 0xeb, 0x95, 0x1f, 0x79, 0x9c, 0x5e, 0xe8, 0x98, 0x38, 0x7b, 0xe2, 0x4f, 0x5a, 0x9a, 0x99, 0x7f, 0x3e, 0x40, 0xb9, 0x9c, 0xd1, 0x83, 0xc7, 0x32, 0xde, 0x9f, 0x3d, 0x88, 0x1b, 0x24, 0xfe, 0xa8, 0x23, 0x90, 0xa8, 0x20, 0x2a, 0xb0, 0x8a, 0x9a, 0xf0, 0x22, 0x59, 0x33, 0x20, 0x5f, 0xe7, 0xe9, 0x43, 0x46, 0xc8, 0x67, 0xbf, 0xec, 0xac, 0x5c, 0xf6, 0x6f, 0xc3, 0xea, 0x9c, 0x77, 0x82, 0x76, 0xe2, 0xe6, 0x58, 0x82, 0x0c, 0x75, 0xe4, 0xcf, 0xde, 0x8b, 0xe3, 0x74, 0x9d, 0xba, 0x3b, 0x93, 0x82, 0x73, 0xd7, 0xa7, 0x92, 0x97, 0x7e, 0x73, 0x3e, 0x96, 0x57, 0x9c, 0x00, 0x71, 0x52, 0x82, 0xd2, 0x9f, 0xc8, 0x70, 0x4c, 0x70, 0xe2, 0xa2, 0xad, 0x70, 0x90, 0x5f, 0xc1, 0xa4, 0xfe, 0x72, 0xa7, 0x4f, 0xf7, 0xa7, 0x13, 0x75, 0x94, 0x40, 0xdd, 0xa9, 0x49, 0x7a, 0x50, 0x33, 0x28, 0xaa, 0xf4, 0x7f, 0x6b, 0x26, 0x10, 0xb1, 0x57, 0x85, 0x25, 0x1f, 0x9d, 0xb9, 0xf8, 0x90, 0x7b, 0x20, 0x6d, 0x2c, 0xfe, 0x2e, 0x97, 0xd2, 0x7c, 0x60, 0x2d, 0x57, 0x80, 0xf0, 0xd1, 0x76, 0xc4, 0x60, 0xe0, 0xef, 0xa7, 0x8c, 0x71, 0x6b, 0xdd, 0xea, 0x7d, 0x96, 0x9f, 0x6c, 0x15, 0xd5, 0x20, 0x9d, 0xdd, 0x6c, 0x9f, 0xc0, 0x47, 0xa1, 0xf7, 0x6c, 0x8f, 0xac, 0xfc, 0xa5, 0xdf, 0x6a, 0xf1, 0x99, 0x53, 0xa9, 0xfb, 0x67, 0xfe, 0x84, 0xaa, 0xac, 0xec, 0x66, 0x5d, 0x72, 0x5e, 0xaf, 0x58, 0x66, 0x67, 0x61, 0x94, 0xb1, 0x77, 0x68, 0x05, 0x51, 0x01, 0xb3, 0xa3, 0x6a, 0x67, 0x40, 0x4c, 0xb4, 0xdb, 0x6e, 0x98, 0x32, 0x03, 0xb5, 0xd6, 0x73, 0x96, 0x24, 0x6f, 0xbb, 0x97, 0x7a, 0x0a, 0x1c, 0x4d, 0xc3, 0x8f, 0x85, 0x6d, 0x1b, 0xca, 0x2d, 0x17, 0x2e, 0x86, 0xd2, 0x83, 0x78, 0x96, 0x4a, 0x1e, 0xf6, 0xff, 0x8b, 0x3c, 0x51, 0xaa, 0xf3, 0x23, 0xa1, 0x5f, 0x5b, 0xfb, 0xec, 0x12, 0xab, 0x3a, 0x63, 0x33, 0xdb, 0xfd, 0xaf, 0xcd, 0x63, 0x9e, 0xc8, 0x04, 0xb1, 0x0d, 0x63, 0xb8, 0xb3, 0xbf, 0xb4, 0x7a, 0x60, 0xa1, 0x9d, 0x2d, 0xb7, 0xfd, 0x5c, 0xf2, 0x87, 0x17, 0xb9, 0xf7, 0x5b, 0x63, 0x74, 0x89, 0xbb, 0xe7, 0x5a, 0x79, 0x63, 0x08, 0xbe, 0x71, 0x59, 0xe8, 0x51, 0x1d, 0xc0, 0x8f, 0x5b, 0x22, 0x3e, 0xa4, 0xc1, 0xe7, 0x5d, 0x4e, 0x2d, 0x90, 0xc2, 0x8d, 0x64, 0x5a, 0x20, 0xc3, 0xc6, 0xe3, 0x6f, 0x52, 0x1e, 0x2f, 0xc8, 0x38, 0x7a, 0x69, 0x1d, 0x35, 0x2d, 0x30, 0x2e, 0x74, 0xd2, 0x8b, 0x8d, 0x54, 0x36, 0x17, 0xf3, 0x7d, 0xa0, 0xf4, 0x3f, 0x2d, 0xf0, 0xf2, 0xb0, 0x5a, 0x4b, 0xc2, 0xec, 0xe9, 0xc6, 0x7e, 0x54, 0xa2, 0xeb, 0xf0, 0xc1, 0xd1, 0x57, 0x1a, 0xd3, 0x5a, 0xc1, 0xdc, 0x57, 0x79, 0xbc, 0x90, 0xc3, 0xfe, 0x53, 0x69, 0xa3, 0x5d, 0xc5, 0xc2, 0x4f, 0x84, 0x8b, 0xc7, 0xc7, 0x68, 0x4c, 0x80, 0x78, 0x7f, 0xc9, 0x34, 0x49, 0xa8, 0x66, 0x4c, 0xcb, 0xbe, 0x46, 0xa6, 0x52, 0x3e, 0xce, 0x8c, 0x44, 0xb5, 0x3c, 0xc3, 0xd0, 0xca, 0x47, 0x14, 0x29, 0x5b, 0xcd, 0x39, 0x56, 0x0d, 0x25, 0x65, 0xcb, 0xff, 0x63, 0xb5, 0x22, 0xbc, 0xcb, 0x7e, 0x6e, 0x0c, 0x21, 0x86, 0x8d, 0x2a, 0x2f, 0x44, 0xea, 0x72, 0x9a, 0x72, 0x31, 0x1d, 0xe8, 0xa7, 0xa9, 0xae, 0x32, 0xde, 0xe7, 0x3e, 0xc1, 0x58, 0x3a, 0x2f, 0xee, 0x0d, 0xd3, 0x34, 0x42, 0x1c, 0xef, 0x37, 0xd6, 0x66, 0x47, 0xbe, 0xe1, 0xbd, 0xd3, 0x88, 0x47, 0xa1, 0xc6, 0xb3, 0xd5, 0x39, 0x43, 0xe0, 0xac, 0x90, 0xd5, 0xb4, 0x40, 0x0b, 0x93, 0x8e, 0xd5, 0xd5, 0x3c, 0x44, 0x7d, 0xed, 0xd7, 0x42, 0x38, 0xe1, 0x6a, 0xba, 0xda, 0xcf, 0x33, 0x13, 0x53, 0xed, 0xd9, 0x08, 0x2e, 0xda, 0x3a, 0xa7, 0xd0, 0x7c, 0x2f, 0x8e, 0x2c, 0xfa, 0xcf, 0x60, 0x41, 0xd6, 0x28, 0xc2, 0xce, 0x73, 0x54, 0xb2, 0x26, 0x99, 0xcd, 0xea, 0x5f, 0xa5, 0x25, 0x43, 0x9b, 0x37, 0x2d, 0x9f, 0xe4, 0xfe, 0xa6, 0xbc, 0x2e, 0x14, 0xe3, 0xc2, 0xb4, 0x9a, 0x2f, 0x2f, 0xe3, 0xcc, 0xc7, 0x51, 0x32, 0xa1, 0xe5, 0x59, 0xd4, 0x23, 0x33, 0x17, 0xe2, 0x1c, 0xd8, 0x83, 0x34, 0x43, 0xda, 0x55, 0xd9, 0xf3, 0x32, 0xfe, 0xc6, 0xd8, 0xda, 0x48, 0x30, 0xc2, 0xac, 0x4a, 0xd9, 0xf1, 0x2f, 0x04, 0x92, 0xc6, 0xd7, 0xc0, 0x2d, 0x05, 0x7c, 0x8f, 0xd5, 0xe4, 0x2b, 0x80, 0x67, 0xf8, 0xd5, 0x67, 0x2a, 0x01, 0x53, 0x0c, 0xd6, 0xae, 0x27, 0xcf, 0x3b, 0xaa, 0xd0, 0x31, 0x2b, 0xfe, 0x2d, 0xa6, 0xce, 0x95, 0x2d, 0x45, 0x2b, 0xad, 0xcd, 0xb4, 0x2d, 0xff, 0x2a, 0xa1, 0xce, 0xaa, 0x52, 0xd6, 0x27, 0x00, 0xa4, 0xf7, 0x2c, 0x1e, 0xe1, 0x64, 0xb1, 0xb2, 0x2c, 0x8a, 0xe0, 0xd9, 0xc7, 0x1b, 0x2d, 0x04, 0xe3, 0xcf, 0xd0, 0x7a, 0x2b, 0xfc, 0xe6, 0xe6, 0xd6, 0x95, 0x2e, 0xfb, 0xdc, 0x27, 0xd8, 0xe5, 0x2e, 0x30, 0xd7, 0xbb, 0xd8, 0xa2, 0x2d, 0x21, 0xc3, 0xac, 0xd8, 0xe3, 0x2b, 0x58, 0xab, 0x92, 0xd8, 0x63, 0x2a, 0x40, 0x92, 0x2b, 0xd5, 0xa7, 0x29, 0xae, 0x7d, 0x46, 0xd4, 0x3c, 0x29, 0x35, 0x69, 0x8b, 0xd4, 0x5e, 0x28, 0xac, 0x58, 0x7b, 0xd5, 0x89, 0x25, 0xc9, 0x3e, 0x9f, 0xd0, 0x0e, 0x2a, 0x8f, 0x2d, 0xf1, 0xce, 0xd9, 0x2b, 0xd9, 0x2c, 0x55, 0xce, 0x15, 0x2c, 0xae, 0x2b, 0x58, 0xcd, 0x8f, 0x2d, 0x44, 0x2a, 0xab, 0x1c, 0x4f, 0xc7, 0xf3, 0xdb, 0xd1, 0x1c, 0xef, 0xce, 0x5e, 0xda, 0x46, 0x22, 0x0d, 0xb3, 0xd9, 0xb6, 0xfe, 0x1f, 0x68, 0xbe, 0x05, 0xb7, 0xd8, 0x23, 0x73, 0xbe, 0x06, 0xb4, 0xba, 0x24, 0x35, 0xbe, 0xa9, 0xae, 0xc7, 0x27, 0x3f, 0xb9, 0x64, 0xa5, 0xa4, 0x28, 0x52, 0xb6, 0x8c, 0x9e, 0xfe, 0x2a, 0x6a, 0xb1, 0xf9, 0x8e, 0xb6, 0x2b, 0x2c, 0xb2, 0xcf, 0x89, 0x1d, 0x2e, 0x84, 0xcb, 0x34, 0x85, 0x9f, 0x2d, 0xbc, 0xcb, 0x69, 0x76, 0xc0, 0x28, 0x8b, 0xcc, 0x55, 0x5c, 0xaa, 0x23, 0x36, 0xcc, 0x4e, 0x2c, 0xd4, 0x26, 0x34, 0xcb, 0xb1, 0x2a, 0x81, 0x28, 0x2a, 0xcb, 0x4b, 0x29, 0x18, 0x29, 0x8a, 0xcb, 0x04, 0x28, 0x25, 0x1f, 0x57, 0xc4, 0xd8, 0xdb, 0x43, 0x1c, 0x77, 0xc8, 0x41, 0xdc, 0x0c, 0x1c, 0xef, 0xce, 0x82, 0xda, 0x44, 0x1a, 0x5b, 0xd7, 0xd7, 0xd9, 0xfe, 0x1f, 0x90, 0xc1, 0x62, 0xba, 0x35, 0x23, 0x73, 0xc2, 0x0f, 0xb6, 0xcc, 0x25, 0xf9, 0xbf, 0x92, 0xac, 0x35, 0x27, 0xf1, 0xbc, 0x52, 0xa5, 0xb2, 0x2a, 0x6e, 0xb7, 0x54, 0x95, 0x0a, 0x2b, 0x3f, 0xb4, 0x7d, 0x8a, 0x2e, 0x2e, 0x75, 0xcb, 0x25, 0x84, 0xe2, 0x2b, 0xa2, 0xcb, 0xb9, 0x6d, 0x09, 0x26, 0x49, 0xcb, 0x77, 0x55, 0xaa, 0x24, 0x0e, 0xcb, 0x0e, 0x2b, 0x9a, 0x27, 0xb6, 0xca, 0x9f, 0x29, 0x08, 0x29, 0xce, 0xca, 0x5f, 0x27, 0xad, 0x2b, 0x27, 0xca, 0x37, 0x26, 0xd8, 0x1c, 0x86, 0xc0, 0x19, 0xe0, 0x19, 0x1f, 0x75, 0xc5, 0x26, 0xdc, 0x11, 0x1d, 0xb6, 0xc8, 0x63, 0xdb, 0xb9, 0x1c, 0xef, 0xce, 0xc7, 0xda, 0x40, 0x1b, 0x3d, 0xd7, 0x38, 0xd8, 0x1c, 0x20, 0x20, 0xc8, 0x6b, 0xc0, 0x72, 0x1c, 0xc9, 0xd2, 0x94, 0xc5, 0x34, 0x22, 0x63, 0xcd, 0x7b, 0xb5, 0x5e, 0x27, 0x94, 0xca, 0x98, 0xa6, 0x97, 0x29, 0x75, 0xc7, 0x06, 0x95, 0x1d, 0x27, 0xc7, 0xc4, 0x61, 0x7f, 0xca, 0x21, 0x4d, 0xc2, 0xb9, 0x62, 0x6d, 0x19, 0xb6, 0xc4, 0x0e, 0x3e, 0xcc, 0x26, 0x10, 0xc8, 0x24, 0x28, 0xcd, 0x2a, 0x7d, 0xc8, 0xb3, 0x26, 0x77, 0x40, 0x7f, 0xc9, 0x6d, 0x20, 0xfa, 0x4d, 0x64, 0xc9, 0xe9, 0x20, 0xb9, 0x1a, 0xdf, 0xba, 0x6c, 0xe4, 0x54, 0x1c, 0x14, 0xbe, 0xac, 0xe1, 0xe3, 0x1d, 0xd1, 0xc5, 0x57, 0xdf, 0x9a, 0x1e, 0x1c, 0xc9, 0x5c, 0xdc, 0xbe, 0x1b, 0x2f, 0xd1, 0xe3, 0xdb, 0x70, 0x17, 0xd7, 0xd6, 0x0c, 0xd6, 0xb7, 0x26, 0x14, 0xcb, 0xdf, 0xc1, 0x9d, 0x2f, 0xc9, 0xc4, 0x28, 0xaf, 0xbc, 0x35, 0x46, 0xbf, 0x79, 0x9f, 0x46, 0x38, 0x6f, 0xbc, 0x20, 0x8e, 0x27, 0x38, 0x13, 0xb9, 0x6d, 0x78, 0xaf, 0x34, 0x51, 0xb7, 0x1e, 0x5d, 0x28, 0x31, 0x62, 0xb8, 0x45, 0x3a, 0x53, 0x32, 0xdc, 0xb8, 0xb1, 0x1d, 0xa0, 0x48, 0x0f, 0xc3, 0x08, 0x1c, 0x88, 0x56, 0x34, 0xc5, 0x8d, 0x1a, 0xed, 0x74, 0x76, 0xc7, 0x9a, 0x19, 0xce, 0x1f, 0x42, 0xb3, 0x64, 0xe3, 0xf0, 0x1c, 0xfd, 0xb7, 0xee, 0xe6, 0x05, 0x1d, 0x83, 0xbf, 0x20, 0xe5, 0x27, 0x1e, 0xe3, 0xc5, 0x63, 0xe3, 0x61, 0x1d, 0xd9, 0xce, 0x5c, 0xe3, 0x2e, 0x2a, 0x69, 0xc6, 0xdc, 0xce, 0x14, 0x36, 0xc8, 0xbd, 0xe0, 0xb8, 0xf6, 0x40, 0x9c, 0xb5, 0x7c, 0xa5, 0xfc, 0x47, 0x05, 0xb1, 0x3e, 0x96, 0x78, 0x4c, 0xe1, 0xad, 0x22, 0x86, 0x8d, 0x4d, 0xa1, 0xab, 0x3e, 0x70, 0xc5, 0x4d, 0x63, 0xaa, 0x54, 0x56, 0xea, 0x4e, 0xfa, 0xab, 0x1a, 0x3a, 0x59, 0x54, 0x5e, 0xad, 0xdd, 0x27, 0x8b, 0x5f, 0x7a, 0xb0, 0xfb, 0x16, 0xc9, 0x6b, 0xd6, 0xb2, 0x9e, 0x14, 0x17, 0x80, 0x8e, 0xc4, 0x63, 0x15, 0x82, 0x24, 0x20, 0xa8, 0x52, 0xe1, 0x7a, 0x23, 0xd8, 0xaf, 0x09, 0xe3, 0x63, 0x24, 0xa3, 0xb4, 0xb2, 0xe5, 0x1c, 0x25, 0xb8, 0xbd, 0xbe, 0xe8, 0x2d, 0x2e, 0xef, 0xbf, 0x92, 0xde, 0x40, 0x3c, 0xa2, 0xb7, 0x37, 0xc7, 0xc2, 0x4a, 0x29, 0xad, 0xdd, 0xae, 0xcd, 0x52, 0x3e, 0xa8, 0xa8, 0x9f, 0xa7, 0x59, 0xb1, 0xa4, 0x88, 0x8f, 0x5e, 0x5d, 0x66, 0xa2, 0x28, 0x7e, 0x51, 0x60, 0x63, 0xa0, 0xdf, 0x6a, 0x4d, 0x64, 0x21, 0xa0, 0xf1, 0x54, 0xde, 0x69, 0xba, 0xa2, 0x2a, 0x40, 0x33, 0x70, 0x77, 0xa3, 0xde, 0x30, 0xd7, 0x76, 0x61, 0xa6, 0x86, 0x1f, 0xf3, 0x7d, 0x6b, 0xaf, 0x26, 0x16, 0xb2, 0x91, 0x57, 0xbe, 0xb2, 0x15, 0xc8, 0x26, 0xe7, 0x9c, 0xa9, 0xe1, 0xbf, 0x28, 0x14, 0xa2, 0x54, 0xe2, 0x9d, 0x2a, 0xd9, 0xa8, 0xb9, 0xe3, 0xec, 0x33, 0x38, 0xaf, 0x7a, 0xe4, 0x90, 0x42, 0xf8, 0xae, 0x7b, 0xd6, 0xbb, 0x4f, 0x82, 0xa7, 0x7e, 0xc2, 0x37, 0x5a, 0x39, 0xa1, 0xa0, 0xad, 0x72, 0x62, 0x90, 0x9d, 0xf6, 0x9b, 0x7b, 0x68, 0x9e, 0x9b, 0x0f, 0x8b, 0x82, 0x6d, 0x99, 0x99, 0x27, 0x7a, 0x60, 0x71, 0xdf, 0x98, 0xbf, 0x66, 0xf9, 0x75, 0xd4, 0x99, 0x6e, 0x54, 0xc1, 0x7a, 0xdd, 0x9b, 0x62, 0x43, 0xd3, 0x7f, 0xa6, 0x9e, 0x36, 0x34, 0x4a, 0x84, 0xf9, 0xa0, 0x97, 0x24, 0xb4, 0x8c, 0x98, 0xa9, 0x8c, 0x1a, 0x7d, 0x98, 0xac, 0xb5, 0xdd, 0x1a, 0x20, 0x28, 0xe5, 0x93, 0x0a, 0xe3, 0x4b, 0x2c, 0x0f, 0x97, 0x15, 0xe3, 0x27, 0x31, 0x25, 0x9c, 0xb1, 0xe4, 0x0e, 0x43, 0x2e, 0xa2, 0xf4, 0xe3, 0xbd, 0x54, 0x0e, 0xa0, 0x27, 0xd3, 0x8f, 0x60, 0x31, 0x99, 0x55, 0xc0, 0x47, 0x69, 0xb5, 0x96, 0xad, 0xad, 0xb1, 0x71, 0xfd, 0x94, 0x66, 0x9b, 0x20, 0x78, 0xc1, 0x92, 0xb3, 0x8a, 0x76, 0x7d, 0xd2, 0x91, 0xb4, 0x78, 0x6c, 0x81, 0xcc, 0x92, 0x14, 0x66, 0xc7, 0x85, 0x3f, 0x93, 0x94, 0x55, 0xc9, 0x88, 0x71, 0x96, 0x05, 0x46, 0x3b, 0x8b, 0xf6, 0x98, 0xf4, 0x36, 0x45, 0x8f, 0x43, 0x9c, 0x0d, 0x27, 0x42, 0x95, 0x71, 0xa3, 0xd2, 0x1e, 0x12, 0xa3, 0x31, 0xae, 0xb9, 0x1e, 0x5e, 0x2b, 0xbe, 0x82, 0xe5, 0xe6, 0x30, 0x30, 0x2a, 0x88, 0xbf, 0xe6, 0x1b, 0x40, 0x20, 0x8f, 0x72, 0xe4, 0x84, 0x56, 0xca, 0x95, 0x47, 0xe2, 0xa3, 0x65, 0xd2, 0x94, 0x05, 0xd4, 0x14, 0x70, 0x95, 0x8f, 0x65, 0xc0, 0x51, 0x7a, 0xa1, 0x8d, 0x0a, 0xad, 0xe7, 0x82, 0xcc, 0x8b, 0xf9, 0x9c, 0x3f, 0x8a, 0x01, 0x8b, 0x0b, 0x8b, 0x40, 0x8d, 0x3d, 0x8a, 0x9c, 0x78, 0xb6, 0x90, 0x32, 0x8b, 0x27, 0x67, 0x3c, 0x93, 0x34, 0x8c, 0xaf, 0x56, 0x91, 0x95, 0x93, 0x8f, 0xca, 0x47, 0xe4, 0x97, 0xf9, 0x93, 0x84, 0x38, 0x24, 0x9a, 0xbf, 0x96, 0x6a, 0x29, 0x43, 0xa0, 0x93, 0x9d, 0x1f, 0x20, 0x39, 0xad, 0xd9, 0xaa, 0xf1, 0x21, 0xc1, 0x30, 0x3d, 0x74, 0x20, 0xe8, 0x5f, 0x38, 0xde, 0x7a, 0x23, 0xe7, 0xfa, 0x54, 0xc7, 0x80, 0xcc, 0xe5, 0xba, 0x69, 0x47, 0x87, 0xfd, 0xe3, 0xcc, 0x78, 0xfe, 0x88, 0xaa, 0xd5, 0x7c, 0x83, 0xc3, 0x85, 0xbb, 0xc1, 0x2b, 0x8c, 0xde, 0x85, 0x1d, 0xaf, 0x41, 0x93, 0x40, 0x84, 0x4b, 0x9d, 0xd5, 0x98, 0x5e, 0x83, 0x6f, 0x8b, 0xc3, 0x9c, 0x0d, 0x82, 0x8f, 0x79, 0x67, 0x9e, 0xa9, 0x82, 0xbd, 0x67, 0xea, 0xa1, 0x2f, 0x84, 0x63, 0x57, 0xb2, 0xa3, 0x7b, 0x87, 0x7e, 0x48, 0x4e, 0xa5, 0x5c, 0x8a, 0xe1, 0x38, 0x83, 0xa7, 0x44, 0x8e, 0x7a, 0x2a, 0x81, 0xac, 0x9c, 0x94, 0x4c, 0x21, 0x27, 0xb5, 0x2e, 0x9e, 0x61, 0x20, 0xbf, 0x36, 0x7c, 0x63, 0x87, 0xea, 0x53, 0x53, 0x98, 0x6d, 0x53, 0xea, 0xd1, 0x69, 0xbe, 0x73, 0xba, 0xe8, 0xdd, 0x7f, 0x93, 0x7b, 0xcf, 0xe6, 0x6f, 0x8d, 0x9e, 0x7f, 0x08, 0xd8, 0xb9, 0x96, 0x9d, 0x7e, 0x46, 0xc5, 0x5e, 0x9e, 0x16, 0x7d, 0xd2, 0xb3, 0x0e, 0xa2, 0x25, 0x7d, 0x9d, 0xa1, 0xc9, 0xa6, 0x8e, 0x7b, 0x2a, 0x8d, 0x01, 0xaa, 0x2c, 0x7a, 0x03, 0x7a, 0x21, 0xac, 0x4a, 0x79, 0xe8, 0x68, 0xf1, 0xae, 0x64, 0x7b, 0x48, 0x58, 0x8f, 0xb0, 0x01, 0x7d, 0x88, 0x48, 0x4c, 0xb1, 0xd8, 0x80, 0xc9, 0x38, 0x34, 0xb2, 0xec, 0x84, 0x44, 0x29, 0xeb, 0xb7, 0x4a, 0x89, 0xdc, 0x1f, 0xc7, 0xbe, 0x6b, 0x91, 0xf3, 0x1e, 0xaa, 0x2d, 0x0b, 0x2e, 0xa1, 0xd2, 0x87, 0x6b, 0xe2, 0x5e, 0xc4, 0xef, 0xc3, 0x7e, 0x4f, 0x65, 0xae, 0xef, 0x29, 0x93, 0xc3, 0x71, 0x13, 0xeb, 0x91, 0xa2, 0xd1, 0x76, 0x4c, 0xdf, 0x9d, 0xa7, 0xef, 0x76, 0x2b, 0xcc, 0x03, 0xad, 0x41, 0x76, 0x8c, 0xb8, 0x6b, 0xb0, 0xa2, 0x75, 0x3c, 0xa3, 0xfe, 0xb4, 0x86, 0x71, 0xf1, 0x8e, 0x5e, 0xb7, 0x35, 0x70, 0x86, 0x7b, 0xb0, 0xb8, 0xa8, 0x6f, 0xbb, 0x6a, 0x47, 0xba, 0x7c, 0x70, 0xc1, 0x59, 0x75, 0xbc, 0x2b, 0x72, 0xb7, 0x48, 0x13, 0xbd, 0xab, 0x75, 0x95, 0x37, 0x21, 0xbe, 0x6c, 0x78, 0xe2, 0x27, 0x2a, 0xc1, 0xae, 0x7d, 0x7f, 0x1a, 0x9c, 0xc5, 0x7c, 0x87, 0x2c, 0x1b, 0x65, 0x2d, 0x24, 0x2e, 0x8f, 0xd2, 0x8f, 0x82, 0xa6, 0x52, 0x7e, 0xf5, 0x03, 0x90, 0xa6, 0x58, 0x96, 0xf0, 0x98, 0xa5, 0xb1, 0x62, 0xc5, 0xe9, 0x7e, 0xb6, 0x99, 0x6d, 0x86, 0xe7, 0x36, 0xba, 0x57, 0x6d, 0xa6, 0xd3, 0xb1, 0xbc, 0x7f, 0x6d, 0xbb, 0xbf, 0x69, 0xbf, 0x54, 0x6b, 0x2a, 0xa8, 0x6b, 0xc2, 0x00, 0x67, 0x53, 0x91, 0xaf, 0xc3, 0x54, 0x65, 0x4f, 0x7e, 0x02, 0xc4, 0x57, 0x64, 0x4c, 0x6b, 0xa9, 0xc5, 0xd3, 0x63, 0x9a, 0x59, 0x06, 0xc7, 0x99, 0x64, 0x56, 0x45, 0xb6, 0xc9, 0x60, 0x66, 0xc8, 0x32, 0xe4, 0xca, 0xb3, 0x6a, 0x0d, 0x22, 0x20, 0xca, 0xa5, 0x73, 0xd9, 0x1f, 0xc9, 0xca, 0x58, 0x7e, 0xa4, 0x1e, 0xd2, 0x83, 0xae, 0x36, 0x71, 0xf6, 0x24, 0x93, 0x14, 0x3c, 0x43, 0xf4, 0xbd, 0xa7, 0xa9, 0x48, 0xde, 0xed, 0x7c, 0xb4, 0xa3, 0x52, 0x97, 0xec, 0x0e, 0xc4, 0xf5, 0x5d, 0x2b, 0xe9, 0xb9, 0xc9, 0xc5, 0x64, 0x56, 0xdd, 0x2d, 0xcc, 0x24, 0x61, 0x1b, 0xc8, 0x2e, 0xce, 0x58, 0x5d, 0x89, 0xaf, 0xce, 0xcf, 0xbc, 0x59, 0xf2, 0x98, 0x29, 0xd0, 0x9a, 0x56, 0xde, 0x82, 0xed, 0xd1, 0xa1, 0x54, 0x2b, 0x6e, 0xf5, 0xd2, 0x0f, 0x52, 0xfc, 0x59, 0xfd, 0xd3, 0xe6, 0x53, 0x24, 0x44, 0x12, 0xd6, 0x05, 0x54, 0x36, 0x2f, 0xf1, 0xd0, 0x67, 0x5c, 0x25, 0x27, 0xd7, 0xcd, 0xfd, 0x68, 0xcb, 0x24, 0x5a, 0xcd, 0x19, 0x72, 0x77, 0x22, 0xc2, 0x8b, 0xfc, 0x33, 0xe8, 0xe9, 0xc4, 0xa2, 0x91, 0x33, 0x8a, 0xe9, 0x2b, 0xb0, 0x30, 0x36, 0x8d, 0xec, 0x12, 0xc6, 0x67, 0x40, 0x7a, 0xef, 0x36, 0xd0, 0x5b, 0x4b, 0x61, 0xeb, 0x89, 0xd8, 0x00, 0x4f, 0x42, 0xdf, 0x4f, 0xdc, 0xc5, 0x52, 0xf4, 0xd1, 0x25, 0xde, 0xd4, 0x4f, 0x0d, 0xb7, 0x54, 0xdf, 0x44, 0x4a, 0xd3, 0x9d, 0x32, 0xde, 0x8f, 0x47, 0x0a, 0x86, 0xa9, 0xdf, 0xc9, 0x43, 0x89, 0x73, 0x1a, 0xdd, 0xb1, 0x3a, 0x3c, 0x58, 0x05, 0xdb, 0x33, 0x34, 0xcd, 0x3d, 0xf5, 0xd3, 0xd1, 0x34, 0x16, 0x2f, 0x0f, 0xd1, 0x6b, 0x49, 0x86, 0x2a, 0x4e, 0xd0, 0x71, 0x59, 0xe2, 0x27, 0xbe, 0xce, 0x77, 0x66, 0x50, 0x25, 0x2a, 0x9e, 0x7b, 0x2f, 0x20, 0xe6, 0xa0, 0xa8, 0xf8, 0x2f, 0xd4, 0xe5, 0xbe, 0xb1, 0x94, 0x33, 0x39, 0xe2, 0x26, 0xcf, 0xa3, 0x35, 0x0b, 0xeb, 0xa7, 0xd9, 0x6a, 0x34, 0xfc, 0xe4, 0x1d, 0xd9, 0x0c, 0x38, 0x46, 0xda, 0x78, 0xdb, 0x02, 0x37, 0xbd, 0xc8, 0x38, 0xdb, 0x45, 0x34, 0xd1, 0xad, 0xb5, 0xdb, 0x35, 0x33, 0x37, 0x94, 0x72, 0xda, 0x68, 0x31, 0x2c, 0x7e, 0xfb, 0xd8, 0x5b, 0x2f, 0x21, 0x6a, 0x2c, 0xd7, 0xa7, 0x2d, 0xcf, 0x55, 0x66, 0xd8, 0x4e, 0x2b, 0xe7, 0x3e, 0x2e, 0xce, 0xd2, 0x32, 0x83, 0x31, 0xd9, 0xcf, 0xbc, 0x2e, 0x67, 0x2c, 0x84, 0xce, 0x95, 0x2e, 0xe1, 0x2b, 0x47, 0xd0, 0x35, 0x58, 0x85, 0x27, 0xb1, 0xa6, 0x8e, 0x2d, 0x5a, 0xe2, 0xcd, 0xb3, 0x84, 0x2e, 0x0c, 0xe2, 0x8a, 0xca, 0xc6, 0x2e, 0x41, 0xe7, 0x60, 0xd7, 0xb2, 0x2f, 0x53, 0xe6, 0x80, 0xd7, 0x1e, 0x31, 0xcb, 0xdb, 0xf6, 0xd9, 0x5c, 0x31, 0x7e, 0xd8, 0x58, 0xd9, 0x55, 0x30, 0x26, 0xc4, 0x5d, 0xd9, 0xb1, 0x2e, 0x8a, 0xac, 0x98, 0xd9, 0x60, 0x2d, 0x76, 0x93, 0x5a, 0xd7, 0x68, 0x2c, 0x53, 0x7e, 0xb7, 0xd5, 0xde, 0x2b, 0x91, 0x6a, 0xe7, 0xd6, 0x01, 0x2a, 0xfd, 0x59, 0xbc, 0xd7, 0x0e, 0x28, 0x78, 0x40, 0x83, 0xd1, 0x36, 0x2b, 0xaf, 0x2e, 0xc8, 0xcf, 0xbe, 0x2c, 0xb7, 0x2c, 0xfc, 0xce, 0xd0, 0x2d, 0x63, 0x2b, 0xdf, 0xce, 0x2c, 0x2d, 0xde, 0x2b, 0x1d, 0x1e, 0x1f, 0xca, 0xd8, 0xdc, 0xe5, 0x1d, 0xc4, 0xd0, 0x7c, 0xdc, 0x4f, 0x1b, 0x2b, 0xdb, 0x10, 0xdd, 0x35, 0x1f, 0xa1, 0xc2, 0x0d, 0xba, 0xe8, 0x23, 0xd0, 0xc1, 0xa4, 0xb8, 0x02, 0x24, 0xce, 0xc2, 0x81, 0xb2, 0x0b, 0x27, 0x59, 0xbc, 0xb4, 0xa8, 0x6d, 0x28, 0x78, 0xbb, 0x71, 0xa2, 0xdf, 0x2c, 0xf2, 0xcd, 0x85, 0x9f, 0xcb, 0x2e, 0xbb, 0xcb, 0x91, 0x94, 0x55, 0x2e, 0x97, 0xcb, 0x47, 0x85, 0xa8, 0x2e, 0x05, 0xcb, 0xa6, 0x76, 0x43, 0x2a, 0x20, 0xcd, 0x21, 0x5d, 0x82, 0x25, 0xc6, 0xcd, 0xf9, 0x2e, 0x12, 0x26, 0xde, 0xcd, 0x8b, 0x2b, 0x08, 0x28, 0xb7, 0xcc, 0xcf, 0x29, 0x86, 0x2a, 0x03, 0xcc, 0x4c, 0x28, 0x83, 0x1f, 0xe9, 0xc6, 0xed, 0xdd, 0x53, 0x1e, 0x6d, 0xcb, 0x9b, 0xdd, 0xa9, 0x1e, 0x09, 0xd1, 0x49, 0xdc, 0xef, 0x1c, 0x25, 0xdb, 0xd6, 0xdc, 0xe7, 0x1a, 0x3f, 0xde, 0x1d, 0xd9, 0x0e, 0x1e, 0xa5, 0xcb, 0xca, 0xc1, 0xb4, 0x22, 0x41, 0xce, 0x93, 0xbb, 0x1b, 0x27, 0x13, 0xc3, 0xdd, 0xab, 0x5b, 0x2c, 0x68, 0xce, 0xbb, 0xa4, 0x85, 0x2e, 0x7d, 0xcb, 0xe6, 0x96, 0x09, 0x2e, 0x91, 0xcb, 0x45, 0x84, 0x58, 0x2c, 0xe4, 0xcc, 0x7f, 0x6d, 0x3c, 0x29, 0xeb, 0xcd, 0x7e, 0x51, 0x19, 0x26, 0x25, 0xce, 0x04, 0x2c, 0xf5, 0x28, 0x94, 0xcd, 0x02, 0x29, 0xb5, 0x2a, 0x7d, 0xcc, 0x39, 0x28, 0x34, 0x2b, 0xb7, 0xcb, 0xb9, 0x27, 0x46, 0x1d, 0xc6, 0xc3, 0xa8, 0xe2, 0x31, 0x20, 0x39, 0xc7, 0xe0, 0xde, 0xc6, 0x1e, 0xf8, 0xcc, 0xdc, 0xde, 0xfb, 0x1c, 0x68, 0xd6, 0x18, 0xdf, 0xce, 0x1c, 0x48, 0xdd, 0x8f, 0xdd, 0xd1, 0x1c, 0xb8, 0xdf, 0xe5, 0xd7, 0x44, 0x22, 0x93, 0xdc, 0x1c, 0xca, 0xfb, 0x27, 0xbf, 0xd7, 0x55, 0xbc, 0x61, 0x2d, 0x53, 0xd1, 0x96, 0xaa, 0x65, 0x2e, 0xb5, 0xcc, 0xb6, 0x98, 0x0e, 0x2e, 0x93, 0xcb, 0x51, 0x82, 0x49, 0x2b, 0x4c, 0xcd, 0x4a, 0x65, 0x27, 0x26, 0x60, 0xce, 0xce, 0x42, 0xfb, 0x1e, 0xb8, 0xcf, 0x5b, 0x21, 0x48, 0x2b, 0xbb, 0xcc, 0x07, 0x27, 0x6a, 0x42, 0xe5, 0xcc, 0xd4, 0x20, 0xc4, 0x4e, 0xff, 0xcc, 0x81, 0x21, 0xe5, 0x1d, 0x3b, 0xbd, 0x76, 0xe5, 0xf0, 0x1e, 0x92, 0xc2, 0x8f, 0xe4, 0x84, 0x21, 0x69, 0xc8, 0xff, 0xe0, 0xaf, 0x20, 0xb9, 0xce, 0xc2, 0xe1, 0x09, 0x1d, 0xfb, 0xda, 0x42, 0xe2, 0x92, 0x19, 0xce, 0xe6, 0x66, 0xe5, 0xe2, 0x26, 0x3b, 0xdc, 0x9b, 0xd1, 0x94, 0x33, 0x28, 0xd4, 0x38, 0xbd, 0xdd, 0x3a, 0x1c, 0xcf, 0x3c, 0xab, 0xf4, 0x3d, 0xd5, 0xcb, 0xce, 0x97, 0xa8, 0x3e, 0x2c, 0xc9, 0xf3, 0x80, 0xfa, 0x3b, 0xbf, 0xc9, 0x27, 0x61, 0xe5, 0x37, 0x24, 0xca, 0x07, 0x3e, 0x3a, 0x3a, 0x90, 0xca, 0x4a, 0x21, 0xca, 0x4d, 0x81, 0xca, 0x7f, 0x21, 0x11, 0x5f, 0xb3, 0xcb, 0x54, 0x1e, 0x5e, 0x78, 0x36, 0xcb, 0x1b, 0x1b, 0x9f, 0x22, 0x16, 0xb4, 0x7f, 0xe4, 0xb0, 0x20, 0x65, 0xba, 0xde, 0xe7, 0x79, 0x22, 0x5f, 0xc1, 0x24, 0xe5, 0xa1, 0x24, 0x5f, 0xc8, 0x5e, 0xe4, 0x7e, 0x28, 0x1b, 0xd0, 0xe8, 0xe3, 0x61, 0x2e, 0x1f, 0xd6, 0xad, 0xdd, 0x06, 0x3d, 0x04, 0xcc, 0xeb, 0xc7, 0xc4, 0x46, 0xff, 0xc5, 0xe4, 0xb5, 0x3f, 0x50, 0x62, 0xbf, 0xc6, 0xa3, 0x40, 0x54, 0x8d, 0xbd, 0x00, 0x91, 0x0b, 0x56, 0xb5, 0xbb, 0x8a, 0x7a, 0x53, 0x57, 0xb8, 0xba, 0x50, 0x60, 0x03, 0x59, 0xcc, 0xbb, 0x9b, 0x42, 0x65, 0x5d, 0xb6, 0xbd, 0xbb, 0x2b, 0xe9, 0x68, 0xb2, 0xbf, 0xdc, 0x1a, 0x6a, 0x7b, 0x26, 0xc6, 0xec, 0x17, 0xe4, 0x89, 0x3f, 0xc8, 0xd9, 0x13, 0xea, 0x26, 0x7c, 0xaa, 0xdb, 0xe2, 0x83, 0x26, 0xdb, 0xb1, 0xd0, 0xe4, 0xa1, 0x28, 0xe7, 0xb6, 0x6a, 0xe6, 0x3c, 0x2b, 0xdb, 0xc0, 0x17, 0xe8, 0xb1, 0x35, 0x35, 0xc9, 0x5e, 0xe9, 0x38, 0x46, 0x4c, 0xc3, 0x2c, 0xd2, 0x35, 0x52, 0x32, 0xbc, 0x5c, 0xbd, 0x7c, 0x5c, 0x58, 0xb5, 0x48, 0xab, 0x93, 0x63, 0x1c, 0xb1, 0x57, 0x9a, 0x93, 0x67, 0x2c, 0xae, 0xfa, 0x89, 0x28, 0x6a, 0xd2, 0xad, 0xe1, 0x74, 0x01, 0x6e, 0x53, 0xad, 0xd3, 0x5e, 0x11, 0x73, 0xbf, 0xae, 0xcd, 0x48, 0x90, 0x79, 0x2a, 0xb1, 0xb8, 0x35, 0x96, 0x7e, 0x4e, 0xb4, 0xcb, 0x24, 0x23, 0x83, 0xd4, 0xba, 0x0a, 0x14, 0x0d, 0x93, 0x66, 0xc2, 0x5d, 0x15, 0x61, 0x29, 0x71, 0x9e, 0x74, 0xe2, 0x59, 0x2b, 0x19, 0xa4, 0xc2, 0xe3, 0x58, 0x2f, 0xc4, 0xaa, 0xc2, 0xe3, 0xa0, 0x38, 0x46, 0xb3, 0x49, 0xe7, 0x5c, 0x4d, 0xe2, 0xb5, 0x3f, 0xda, 0xc7, 0x58, 0xc0, 0xb2, 0xaf, 0xcb, 0xd9, 0x63, 0x58, 0xac, 0xc3, 0xb8, 0xbd, 0x6c, 0x1f, 0xa9, 0x0b, 0xa6, 0x6f, 0x72, 0xf1, 0xa6, 0x14, 0x95, 0xd7, 0x78, 0x05, 0xa4, 0x5d, 0x84, 0x36, 0x7c, 0x35, 0xa3, 0xc5, 0x70, 0xb2, 0x80, 0x3e, 0xa4, 0x8c, 0x5e, 0x15, 0x84, 0x46, 0xa6, 0x05, 0x4b, 0xab, 0x87, 0xe1, 0xa9, 0x4e, 0x39, 0x64, 0x8c, 0xa9, 0xac, 0x39, 0x29, 0xf8, 0x91, 0x46, 0xb0, 0x2e, 0x19, 0xeb, 0x9d, 0x82, 0xbc, 0xef, 0x19, 0x86, 0x2b, 0xdf, 0x94, 0x50, 0xe2, 0xff, 0x2e, 0xe1, 0x99, 0xb3, 0xe3, 0x93, 0x34, 0x6b, 0x9e, 0xdc, 0xe4, 0xa5, 0x4b, 0xff, 0xa5, 0xfd, 0xe4, 0x8e, 0x5e, 0x1f, 0xa7, 0x53, 0xd9, 0xe5, 0x68, 0xb1, 0xa2, 0xc1, 0xc8, 0xae, 0x73, 0x20, 0xa0, 0xcc, 0xb8, 0x0e, 0x7b, 0xc9, 0x9e, 0xc3, 0xa5, 0xb7, 0x83, 0x07, 0x9d, 0x41, 0x94, 0x9e, 0x88, 0x18, 0x9c, 0x5b, 0x82, 0x49, 0x8b, 0xda, 0x9c, 0x37, 0x70, 0x42, 0x8e, 0xea, 0x9d, 0x5a, 0x5e, 0x76, 0x91, 0x72, 0x9f, 0x8f, 0x4e, 0x0f, 0x94, 0x44, 0xa2, 0x6d, 0x3d, 0x9c, 0x97, 0x71, 0xa5, 0x7c, 0x2d, 0xd9, 0x99, 0xa5, 0xa9, 0x90, 0x1f, 0x09, 0xa8, 0x7b, 0xb5, 0xc3, 0x1e, 0xb9, 0x2e, 0x65, 0x85, 0xd4, 0xe6, 0x2e, 0x33, 0x7e, 0x8c, 0x65, 0xe4, 0xe3, 0x49, 0x7e, 0x91, 0xc8, 0xe4, 0x16, 0x5d, 0xb2, 0x98, 0xaa, 0xe3, 0xb7, 0x6f, 0x5a, 0x9b, 0x79, 0xda, 0x88, 0x7a, 0xea, 0x99, 0x92, 0xca, 0x2f, 0x84, 0x8d, 0x96, 0xf1, 0xb8, 0x3a, 0x8c, 0xe1, 0x96, 0x3b, 0xa6, 0xc3, 0x94, 0x1b, 0x95, 0x27, 0x95, 0x5c, 0x97, 0x67, 0x95, 0x07, 0x82, 0xbd, 0x9a, 0x3d, 0x94, 0xd1, 0x70, 0xa9, 0x9c, 0x5d, 0x95, 0xac, 0x5f, 0x3f, 0x9e, 0x3e, 0x98, 0x79, 0x4f, 0x84, 0xa0, 0x64, 0x9b, 0xcf, 0x3f, 0xad, 0xa2, 0xa6, 0x9f, 0x19, 0x30, 0x9c, 0xa4, 0xe4, 0xa2, 0x0b, 0x21, 0x79, 0xb0, 0xd7, 0xaf, 0x30, 0x20, 0xee, 0x32, 0xbc, 0x75, 0x25, 0xe6, 0xc1, 0x48, 0xd7, 0x7e, 0xc8, 0xe6, 0xd3, 0x5b, 0x87, 0x84, 0x65, 0xe6, 0x14, 0x70, 0xad, 0x8b, 0x86, 0xe4, 0xae, 0x82, 0xb7, 0x91, 0x85, 0xdf, 0x8c, 0x8d, 0xf0, 0x90, 0x88, 0xcc, 0x5e, 0x97, 0x54, 0x8f, 0x32, 0xba, 0x74, 0x9d, 0x9b, 0x8e, 0xba, 0xa8, 0xb3, 0xa2, 0xcc, 0x8d, 0xdb, 0x96, 0x59, 0xa6, 0x27, 0x8c, 0xc2, 0x83, 0x40, 0xa8, 0x91, 0x8c, 0x43, 0x71, 0x0b, 0xaa, 0x5d, 0x8c, 0xff, 0x60, 0x2b, 0xac, 0x11, 0x8f, 0x2c, 0x4f, 0xd6, 0xad, 0xc4, 0x92, 0x4c, 0x3f, 0xa3, 0xaf, 0x37, 0x95, 0x66, 0x30, 0xc0, 0xb1, 0x1a, 0x98, 0x84, 0x22, 0x11, 0xb9, 0x8e, 0xa3, 0x12, 0x21, 0x43, 0x46, 0x41, 0x6a, 0x10, 0xec, 0x29, 0x5a, 0x3a, 0x72, 0x0c, 0xea, 0x5c, 0x70, 0x83, 0x77, 0x72, 0xe8, 0xf6, 0x87, 0x35, 0x80, 0xbd, 0xe7, 0xc3, 0x98, 0x32, 0x88, 0x5b, 0xe3, 0xb5, 0xa1, 0x76, 0x88, 0x62, 0xcf, 0xe9, 0xa8, 0xac, 0x88, 0x21, 0xbe, 0x8f, 0xad, 0x48, 0x87, 0xb7, 0xac, 0x8c, 0xb1, 0x37, 0x85, 0x8f, 0x97, 0x91, 0xb3, 0xe1, 0x83, 0xd5, 0x84, 0x08, 0xb5, 0xfb, 0x83, 0x71, 0x71, 0xd7, 0xb7, 0x0b, 0x83, 0xef, 0x61, 0x15, 0xb8, 0x2b, 0x85, 0x57, 0x4f, 0xda, 0xb9, 0x74, 0x87, 0xbc, 0x3e, 0xa6, 0xba, 0xd1, 0x8a, 0x60, 0x2f, 0x63, 0xbb, 0xe3, 0x8c, 0x8b, 0x1f, 0x59, 0xc2, 0xf5, 0x96, 0x2f, 0x1e, 0x97, 0x2d, 0x17, 0x2e, 0xaa, 0xd2, 0x92, 0x72, 0x9d, 0x61, 0xa0, 0xef, 0x5a, 0x88, 0x8a, 0x6b, 0xa5, 0xec, 0xb0, 0x97, 0x91, 0x74, 0xd7, 0xeb, 0x74, 0xad, 0xa0, 0x80, 0x3a, 0xea, 0xef, 0xb3, 0x4b, 0x80, 0xf8, 0xd6, 0x91, 0xb8, 0x2b, 0x80, 0xb2, 0xc3, 0xc1, 0xbb, 0x43, 0x7f, 0x62, 0xaf, 0x2b, 0xbf, 0x13, 0x7c, 0x5b, 0x98, 0xb4, 0xc0, 0x54, 0x7a, 0x5c, 0x85, 0x38, 0xc1, 0x51, 0x79, 0x54, 0x73, 0x50, 0xc2, 0x48, 0x79, 0x9e, 0x61, 0xea, 0xc3, 0x43, 0x7a, 0xaa, 0x4f, 0x9b, 0xc4, 0x2b, 0x7c, 0xc2, 0x3c, 0xf8, 0xc5, 0x7d, 0x7e, 0xb4, 0x2b, 0x76, 0xc6, 0x10, 0x80, 0xc9, 0x1a, 0xd1, 0xc8, 0x3c, 0x8a, 0x99, 0x1b, 0x9e, 0x2d, 0x31, 0x2e, 0x99, 0xd2, 0x9b, 0x86, 0x19, 0x56, 0x95, 0xf3, 0xe1, 0x98, 0x97, 0x5f, 0xfc, 0xec, 0x34, 0xa9, 0x42, 0x66, 0x69, 0xe9, 0x79, 0xba, 0x4e, 0x70, 0xe4, 0xea, 0x9b, 0xc4, 0xa3, 0x78, 0xcb, 0xdf, 0xbf, 0xc6, 0xa4, 0x77, 0x64, 0xca, 0xc5, 0xc8, 0xaa, 0x75, 0x94, 0xb4, 0x33, 0xca, 0xcc, 0x71, 0xd7, 0x9d, 0xdd, 0xcb, 0xd7, 0x6f, 0x52, 0x88, 0xeb, 0xcc, 0xaf, 0x6d, 0x60, 0x75, 0x51, 0xcd, 0x69, 0x6c, 0x72, 0x61, 0xb3, 0xce, 0xa2, 0x6c, 0xc0, 0x4d, 0xd5, 0xcf, 0xe0, 0x6e, 0x98, 0x39, 0xaa, 0xd0, 0xc0, 0x71, 0xea, 0x26, 0xcb, 0xcd, 0x6a, 0x79, 0xf4, 0x22, 0xba, 0xcc, 0x83, 0x83, 0x18, 0x20, 0xad, 0x8c, 0xa1, 0x3c, 0x5e, 0xf6, 0x48, 0x9a, 0x3f, 0x45, 0x2d, 0xf1, 0x4f, 0xa7, 0x19, 0x4d, 0xdc, 0xec, 0x6f, 0xb7, 0x9a, 0x5b, 0x47, 0xe9, 0x60, 0xc8, 0x06, 0x62, 0xb8, 0xe9, 0x84, 0xd3, 0x35, 0x6e, 0xfb, 0xe8, 0xa4, 0xd4, 0xc9, 0x6d, 0x0a, 0xd3, 0x43, 0xd7, 0x16, 0x69, 0x9c, 0xba, 0xa6, 0xd8, 0x44, 0x65, 0x9f, 0xa2, 0x19, 0xd8, 0xb1, 0x62, 0x4b, 0x8c, 0x07, 0xd8, 0x89, 0x5f, 0xe1, 0x77, 0x53, 0xd8, 0xf8, 0x5d, 0x7d, 0x62, 0x45, 0xda, 0x4c, 0x5d, 0xc5, 0x4b, 0x84, 0xdc, 0x8c, 0x5d, 0x6f, 0x36, 0xbd, 0xd4, 0x0b, 0x62, 0x83, 0x2a, 0x59, 0xd0, 0x0a, 0x6d, 0xef, 0x26, 0x0e, 0xce, 0xc4, 0x76, 0xef, 0x24, 0x16, 0x96, 0x13, 0x32, 0xf3, 0xed, 0x62, 0xa6, 0x35, 0x36, 0x76, 0xec, 0xff, 0xb6, 0x4d, 0x3b, 0x7a, 0xf0, 0x05, 0xc5, 0x34, 0x4a, 0xf6, 0xec, 0xc3, 0xd0, 0x52, 0x53, 0xab, 0xe8, 0x5a, 0xd5, 0xfd, 0x57, 0x7a, 0xdd, 0x5b, 0xdb, 0xb7, 0x59, 0xd5, 0xd3, 0x5e, 0xe0, 0x0b, 0x54, 0x62, 0xb9, 0x68, 0xde, 0xd2, 0x51, 0x2e, 0x9f, 0xa8, 0xe0, 0xf7, 0x4b, 0xcf, 0x88, 0xeb, 0xe2, 0x46, 0x47, 0x14, 0x74, 0x34, 0xe0, 0x52, 0x41, 0x36, 0x5b, 0xcf, 0xdd, 0x46, 0x3a, 0xbc, 0x41, 0x34, 0xd8, 0xa5, 0x41, 0x17, 0x31, 0x80, 0xd3, 0xfd, 0x4f, 0xe2, 0x2b, 0xd3, 0xd1, 0xaa, 0x5d, 0x8e, 0x28, 0xcc, 0xcf, 0xb8, 0x6a, 0xef, 0x26, 0x0a, 0xa2, 0x65, 0x30, 0xad, 0xe6, 0xa2, 0xa7, 0xa1, 0x34, 0x03, 0xdf, 0xe9, 0xb5, 0x56, 0x35, 0xb6, 0xe4, 0xfe, 0xd2, 0xc8, 0x38, 0x65, 0xee, 0x50, 0xd9, 0xf3, 0x37, 0xc1, 0xe2, 0x05, 0xd9, 0xf6, 0x42, 0xc1, 0xda, 0xdc, 0xdd, 0x7b, 0x44, 0x36, 0xce, 0xdd, 0xdc, 0xae, 0x3a, 0xcd, 0xb0, 0x56, 0xdc, 0x11, 0x36, 0x11, 0x95, 0x74, 0xdc, 0x77, 0x34, 0x84, 0x80, 0xd4, 0xda, 0x51, 0x32, 0x15, 0x6b, 0xe8, 0xd9, 0x7f, 0x30, 0xfb, 0x57, 0x52, 0xd9, 0x93, 0x2f, 0x29, 0x40, 0x3b, 0xd3, 0x72, 0x2f, 0x31, 0x30, 0x04, 0xd0, 0xdf, 0x2f, 0x8c, 0x2d, 0x5d, 0xd1, 0xb0, 0x4b, 0x17, 0x2a, 0x57, 0xd1, 0x62, 0x5a, 0xb0, 0x28, 0x78, 0xa9, 0x7f, 0x2f, 0x1c, 0xe3, 0xe2, 0xb5, 0x58, 0x2f, 0x92, 0xe4, 0x36, 0xc8, 0x21, 0x32, 0xed, 0xe3, 0x2c, 0xd5, 0x76, 0x32, 0xd7, 0xdf, 0x44, 0xd7, 0x87, 0x33, 0xf9, 0xdb, 0xd0, 0xd9, 0x88, 0x33, 0xfd, 0xd8, 0xa3, 0xd9, 0xe0, 0x32, 0x7d, 0xc4, 0xc9, 0xda, 0x51, 0x31, 0x0b, 0xad, 0x4c, 0xda, 0x29, 0x2f, 0xfc, 0x94, 0x2d, 0xd8, 0xea, 0x2e, 0xa8, 0x7f, 0xec, 0xd7, 0x4a, 0x2d, 0xa7, 0x6c, 0x13, 0xd7, 0x73, 0x2d, 0x0f, 0x5a, 0xd1, 0xd8, 0x3b, 0x2a, 0xf3, 0x42, 0x39, 0xd2, 0x5f, 0x2c, 0xcf, 0x2f, 0x9e, 0xd0, 0xa2, 0x2d, 0x96, 0x2d, 0xa1, 0xcf, 0x8a, 0x2e, 0x1a, 0x2c, 0x64, 0xce, 0xc8, 0x2e, 0x7a, 0x2b, 0x8f, 0x1e, 0xd6, 0xce, 0x23, 0xde, 0xc8, 0x1e, 0x9e, 0xd2, 0x98, 0xde, 0x54, 0x1c, 0x9c, 0xdd, 0xf2, 0xdf, 0x18, 0x1c, 0x07, 0xdf, 0x57, 0xdd, 0x0d, 0x24, 0x2d, 0xc5, 0x47, 0xbb, 0x49, 0x20, 0x01, 0xcf, 0x01, 0xbe, 0x19, 0x23, 0xbb, 0xc5, 0x97, 0xb0, 0x40, 0x2b, 0x06, 0xd2, 0x2e, 0xae, 0x0b, 0x2d, 0xab, 0xcd, 0xf9, 0x9f, 0xcd, 0x2f, 0x07, 0xcb, 0xb7, 0x94, 0x00, 0x2e, 0xa3, 0xcb, 0x54, 0x85, 0xac, 0x2e, 0x3c, 0xcb, 0xd6, 0x75, 0xbb, 0x2b, 0x36, 0xcd, 0xad, 0x5e, 0x17, 0x28, 0x04, 0xcf, 0x16, 0x2f, 0x1c, 0x28, 0x5c, 0xce, 0xdf, 0x2c, 0x18, 0x29, 0x45, 0xce, 0x52, 0x29, 0xf5, 0x2a, 0x7c, 0xcd, 0x93, 0x28, 0xe0, 0x20, 0x7c, 0xc9, 0x05, 0xdf, 0x6a, 0x1f, 0x5b, 0xcf, 0x57, 0xe0, 0x08, 0x1c, 0xe0, 0xd7, 0xc0, 0xe1, 0x6d, 0x1c, 0xe0, 0xdf, 0xd0, 0xe0, 0x78, 0x1d, 0x37, 0xe1, 0x62, 0xdb, 0xc8, 0x22, 0xf0, 0xdd, 0x9a, 0xd0, 0xff, 0x24, 0x8c, 0xdb, 0x7a, 0xc6, 0x80, 0x2a, 0x7f, 0xd5, 0x35, 0xb5, 0x5c, 0x2d, 0xad, 0xcf, 0x9b, 0xa4, 0xd3, 0x2e, 0xeb, 0xcc, 0x2f, 0x95, 0xcc, 0x2e, 0x9e, 0xcb, 0x54, 0x83, 0xc4, 0x2d, 0x94, 0xcc, 0xed, 0x6d, 0x25, 0x2c, 0x6a, 0xce, 0xd2, 0x52, 0x36, 0x29, 0x36, 0xcf, 0x8d, 0x2e, 0xa2, 0x29, 0xa6, 0xcf, 0x3b, 0x2a, 0x94, 0x2b, 0x2c, 0xce, 0x12, 0x28, 0xbc, 0x2c, 0x48, 0xcd, 0x3b, 0x27, 0xb4, 0x1f, 0xa1, 0xc6, 0x78, 0xe3, 0x43, 0x21, 0xa2, 0xc9, 0xcb, 0xe0, 0x87, 0x20, 0xfb, 0xd0, 0x65, 0xe1, 0x1c, 0x1e, 0x8f, 0xda, 0x96, 0xe2, 0xc4, 0x1e, 0x82, 0xe2, 0x5e, 0xe2, 0x09, 0x22, 0x4a, 0xe1, 0x92, 0xd8, 0x4e, 0x27, 0xb3, 0xde, 0x5c, 0xcc, 0x79, 0x2b, 0xa4, 0xd9, 0xe7, 0xbe, 0x53, 0x2f, 0xa5, 0xd2, 0xe6, 0xab, 0x36, 0x2f, 0x42, 0xcc, 0xdc, 0x97, 0xb5, 0x2e, 0xb3, 0xcb, 0x45, 0x81, 0xa2, 0x2d, 0x5b, 0xce, 0x5c, 0x65, 0x9f, 0x2b, 0xb7, 0xd2, 0x11, 0x41, 0xa8, 0x2c, 0x0b, 0xd0, 0x9c, 0x2d, 0x96, 0x2c, 0xfc, 0xcf, 0x5b, 0x28, 0x5f, 0x49, 0xd0, 0xd0, 0x5b, 0x22, 0x0b, 0x53, 0x44, 0xcf, 0x5d, 0x20, 0x25, 0x20, 0x3f, 0xbf, 0x52, 0xe5, 0xe4, 0x21, 0x84, 0xc4, 0xbe, 0xe4, 0xbc, 0x23, 0xfe, 0xca, 0x83, 0xe1, 0xb9, 0x24, 0x85, 0xd1, 0x86, 0xe2, 0x42, 0x23, 0x36, 0xdd, 0x98, 0xe4, 0x48, 0x28, 0x4c, 0xe3, 0x48, 0xe1, 0x21, 0x2e, 0xa4, 0xe1, 0xa4, 0xd4, 0xc5, 0x37, 0xf1, 0xe3, 0x65, 0xcb, 0x4d, 0x3f, 0xbe, 0xdf, 0x22, 0xb8, 0xa6, 0x44, 0xf2, 0xdb, 0x50, 0xa2, 0xa0, 0x45, 0x2f, 0xda, 0x5b, 0x8a, 0x2b, 0x43, 0x84, 0xd9, 0x99, 0x69, 0x3b, 0x3f, 0x18, 0xdb, 0x12, 0x43, 0xd5, 0x44, 0x21, 0xd7, 0x4a, 0x29, 0x6a, 0x53, 0x19, 0xd1, 0xa7, 0x21, 0x42, 0x70, 0x95, 0xd1, 0x0b, 0x1e, 0x80, 0x7a, 0xb1, 0xce, 0x6f, 0x1d, 0x97, 0x24, 0x8c, 0xb5, 0x74, 0xe5, 0x57, 0x23, 0xab, 0xbd, 0x17, 0xe7, 0xf1, 0x25, 0x70, 0xc3, 0x6f, 0xe6, 0xbe, 0x28, 0x39, 0xca, 0xea, 0xe5, 0x16, 0x2f, 0x96, 0xd3, 0x10, 0xe3, 0x4d, 0x3a, 0xe8, 0xdf, 0xe9, 0xe5, 0x24, 0x47, 0x3a, 0xda, 0x4e, 0xd4, 0x2f, 0x50, 0xe4, 0xd3, 0x24, 0xc1, 0xfc, 0x57, 0xdf, 0xce, 0x1f, 0xaf, 0x00, 0x5c, 0xeb, 0xcb, 0x00, 0x9b, 0xb2, 0x5e, 0x7a, 0xca, 0x0b, 0x83, 0xda, 0x5f, 0xe3, 0xc9, 0xa4, 0x69, 0x89, 0x61, 0xb3, 0xca, 0xc3, 0x4c, 0x7e, 0x69, 0xb1, 0xcc, 0xe0, 0x31, 0xa3, 0x72, 0xf6, 0xce, 0x6f, 0x1d, 0xdb, 0x7b, 0x2d, 0xcb, 0xfa, 0x1b, 0xa4, 0x90, 0x6a, 0xcb, 0xc6, 0x17, 0x52, 0x28, 0x83, 0xad, 0x5d, 0xe3, 0x82, 0x29, 0x58, 0xb4, 0x7f, 0xe5, 0xc8, 0x2b, 0x19, 0xb9, 0x7b, 0xe8, 0x0d, 0x2f, 0xd8, 0xc2, 0x24, 0xe8, 0xd7, 0x3f, 0x5a, 0xcb, 0xee, 0xe6, 0xd9, 0x52, 0xfc, 0xcd, 0x42, 0xd9, 0xb9, 0x5b, 0x91, 0xc9, 0x4f, 0xca, 0x7a, 0x65, 0x94, 0xc2, 0xca, 0xb8, 0x6f, 0x6c, 0x71, 0xbf, 0x5d, 0xa7, 0x39, 0x71, 0x43, 0xbc, 0x66, 0x94, 0x8b, 0x74, 0xa8, 0xbb, 0xca, 0x7f, 0x5a, 0x78, 0x49, 0xbb, 0xdd, 0x68, 0x6e, 0x7c, 0xd1, 0xbc, 0xb2, 0x50, 0xc0, 0x82, 0x1d, 0xbf, 0x14, 0x3a, 0x4a, 0x87, 0x35, 0xc2, 0x07, 0x27, 0x90, 0x89, 0x61, 0xc2, 0x81, 0x15, 0xa5, 0x96, 0x9c, 0xc7, 0x67, 0x12, 0x8b, 0x2b, 0x8d, 0xa0, 0xb9, 0xe2, 0xe6, 0x2d, 0x96, 0xa7, 0x1d, 0xe4, 0x03, 0x31, 0xd4, 0xac, 0xba, 0xe4, 0xdc, 0x40, 0x47, 0xb6, 0xbc, 0xe8, 0x83, 0x57, 0x49, 0xbf, 0x40, 0xe3, 0x9c, 0x63, 0x0b, 0xbc, 0xef, 0xd4, 0x0a, 0x6d, 0x58, 0xb8, 0x9d, 0xc3, 0xc5, 0x76, 0x37, 0xb5, 0x12, 0xb2, 0x5f, 0x7d, 0x81, 0xb2, 0xa8, 0xa1, 0xbc, 0x82, 0x7d, 0xb0, 0xc2, 0x8f, 0xfd, 0x86, 0x9c, 0xaf, 0xa9, 0x7b, 0x8e, 0x8a, 0xa8, 0xb0, 0x1d, 0x67, 0xc1, 0x8d, 0xf3, 0xb1, 0x75, 0x54, 0x05, 0x90, 0xfe, 0xb4, 0x53, 0x40, 0xce, 0x93, 0x98, 0xb7, 0x77, 0x2e, 0x91, 0x97, 0xc2, 0xb9, 0xa1, 0x1c, 0x77, 0xa1, 0x3d, 0xc2, 0x1e, 0x19, 0x80, 0x2d, 0xf3, 0x96, 0xd6, 0xe3, 0x54, 0x31, 0x34, 0x9c, 0x39, 0xe4, 0x01, 0x3c, 0xbd, 0xa0, 0x4f, 0xe4, 0x81, 0x54, 0x5c, 0xa9, 0x27, 0xe5, 0x05, 0x67, 0x34, 0xaf, 0x61, 0xe1, 0xee, 0x73, 0x9e, 0xaf, 0x02, 0xd2, 0xc6, 0x7d, 0x32, 0xab, 0x49, 0xc2, 0x67, 0x86, 0x07, 0xa9, 0xa3, 0xb0, 0xfe, 0x8d, 0x93, 0xa8, 0x5a, 0x9f, 0xa5, 0x92, 0x9d, 0xa7, 0x4c, 0x8c, 0xfb, 0x96, 0x2b, 0xa6, 0xed, 0x7a, 0x2a, 0x98, 0xd6, 0xa7, 0x8d, 0x67, 0x8f, 0x9b, 0x03, 0xa9, 0x4a, 0x55, 0xd6, 0x9d, 0x45, 0xac, 0x22, 0x44, 0xfa, 0x9f, 0xef, 0xaf, 0x26, 0x34, 0x7e, 0xa2, 0x4d, 0xb2, 0x81, 0x24, 0xed, 0xac, 0x37, 0xbb, 0x0a, 0x1e, 0xbd, 0x30, 0xa4, 0x88, 0xac, 0xe6, 0x29, 0x3c, 0x77, 0x8f, 0x61, 0xe5, 0x44, 0x54, 0xa0, 0x93, 0x91, 0xe2, 0x68, 0x63, 0xc3, 0x9b, 0xc5, 0xe4, 0x7e, 0x78, 0xde, 0xa4, 0x7d, 0xe3, 0x1d, 0x85, 0x04, 0xa3, 0x18, 0xd3, 0x17, 0x8e, 0xf1, 0xa1, 0x3f, 0xc2, 0x7f, 0x97, 0xa0, 0xa0, 0xaa, 0xb1, 0x83, 0x9e, 0x7d, 0x9f, 0x8b, 0x9f, 0xc4, 0xa1, 0xc3, 0x9f, 0x85, 0x8c, 0xe3, 0xa4, 0x26, 0x9e, 0xc9, 0x7a, 0x5a, 0xa5, 0xd6, 0x9f, 0x5a, 0x68, 0x05, 0xa7, 0x7e, 0xa1, 0x3d, 0x57, 0x2f, 0xa8, 0xfc, 0xa4, 0x67, 0x47, 0x40, 0xab, 0x08, 0xa7, 0x7a, 0x37, 0x3b, 0xad, 0x69, 0xaa, 0x6a, 0x27, 0xe0, 0xb6, 0x05, 0xb4, 0x55, 0x21, 0xd2, 0x37, 0x73, 0x77, 0x9b, 0xe3, 0xad, 0x53, 0xd3, 0x80, 0x60, 0xe5, 0xb4, 0x63, 0x51, 0x86, 0x65, 0xe3, 0x24, 0x76, 0xe8, 0x8f, 0x33, 0xe5, 0x6c, 0x8d, 0x0e, 0x99, 0x5b, 0xe5, 0xa0, 0x99, 0x00, 0x99, 0xee, 0xd5, 0x3d, 0xa1, 0x61, 0x98, 0xd6, 0xc4, 0x64, 0xa8, 0x47, 0x98, 0xb0, 0xb3, 0x47, 0xad, 0x78, 0x98, 0x17, 0xa0, 0xce, 0xb0, 0x02, 0x96, 0xcf, 0x8d, 0x2a, 0xb1, 0xf3, 0x95, 0xfc, 0x7a, 0x86, 0xb3, 0x3a, 0x96, 0x1e, 0x68, 0xe0, 0xb4, 0x46, 0x97, 0xb1, 0x57, 0xac, 0xb5, 0xb1, 0x99, 0xf9, 0x47, 0x05, 0xb7, 0x1e, 0x9c, 0x91, 0x36, 0xf2, 0xb8, 0xc0, 0x9f, 0xa8, 0x27, 0xed, 0xbf, 0x2e, 0xa9, 0x37, 0x21, 0x88, 0x54, 0x03, 0x70, 0x02, 0xea, 0x75, 0x63, 0xe0, 0x74, 0xb1, 0xe8, 0x55, 0x78, 0x64, 0x7a, 0x34, 0xe9, 0x2c, 0x8a, 0x2c, 0x83, 0xf8, 0xe7, 0x1d, 0x9e, 0xe0, 0x8e, 0xe9, 0xe6, 0xd3, 0xac, 0xe9, 0x92, 0x6d, 0xd9, 0x5a, 0xb3, 0x4b, 0x92, 0x2e, 0xc9, 0x09, 0xb8, 0x17, 0x92, 0x14, 0xb7, 0x78, 0xbb, 0x0e, 0x8f, 0xf8, 0xa2, 0x20, 0xbd, 0x61, 0x8e, 0x1c, 0x8d, 0xbf, 0xbe, 0x73, 0x8d, 0x04, 0x7b, 0x46, 0xbf, 0x75, 0x8c, 0x9a, 0x69, 0xb1, 0xbf, 0xfc, 0x8d, 0x99, 0x58, 0x33, 0xc0, 0xbb, 0x8e, 0xde, 0x46, 0x8d, 0xc1, 0xa8, 0x90, 0xe9, 0x35, 0x0d, 0xc2, 0xcd, 0x93, 0x77, 0x24, 0xc9, 0xc5, 0xd3, 0x99, 0xea, 0x1e, 0xba, 0x6a, 0x58, 0x61, 0x5c, 0xef, 0x43, 0x7b, 0x6a, 0x67, 0x6a, 0xee, 0xb7, 0x8a, 0xf4, 0x70, 0x18, 0xed, 0xd6, 0x9c, 0x94, 0x78, 0x7b, 0xeb, 0x8d, 0xb1, 0xf1, 0x85, 0x1c, 0xea, 0xe0, 0xbe, 0x7e, 0x8b, 0xb0, 0xdf, 0x3e, 0xc2, 0x24, 0x8a, 0xe1, 0xce, 0xb6, 0xc4, 0xe0, 0x89, 0x9f, 0xba, 0x43, 0xc7, 0x38, 0x86, 0xd3, 0xa3, 0x99, 0xc8, 0x73, 0x84, 0xaa, 0x8f, 0x02, 0xc8, 0xdb, 0x83, 0x64, 0x7c, 0x63, 0xc9, 0x53, 0x82, 0xb0, 0x6a, 0x56, 0xca, 0x28, 0x82, 0xc5, 0x57, 0x62, 0xcb, 0x10, 0x83, 0xbd, 0x44, 0x20, 0xcb, 0xda, 0x85, 0x8d, 0x31, 0x75, 0xcc, 0xb2, 0x87, 0x8b, 0x20, 0xc6, 0xcc, 0x0d, 0x8e, 0xd6, 0x1d, 0xec, 0x76, 0xb3, 0x56, 0xe0, 0xf3, 0x17, 0x8a, 0xb2, 0x5d, 0xf3, 0xf1, 0x2e, 0x9c, 0xe6, 0x62, 0xd6, 0xeb, 0xb5, 0xad, 0x3d, 0x6b, 0x98, 0xe9, 0xb0, 0xbc, 0xcd, 0x77, 0x3d, 0xe8, 0x92, 0xcf, 0x3e, 0x83, 0x1e, 0xea, 0xd4, 0xd0, 0x13, 0x82, 0xa6, 0xd5, 0x28, 0xd2, 0x37, 0x80, 0x18, 0xbf, 0x56, 0xd3, 0xac, 0x7d, 0x0e, 0xa8, 0x02, 0xd3, 0xfa, 0x7a, 0x5e, 0x91, 0xe0, 0xd4, 0x2b, 0x78, 0x4f, 0x7e, 0x20, 0xd4, 0x13, 0x76, 0xb7, 0x6a, 0x67, 0xd4, 0xc1, 0x76, 0x70, 0x55, 0xcb, 0xd5, 0x8e, 0x77, 0x26, 0x40, 0x6e, 0xd6, 0xe7, 0x79, 0x4c, 0x2c, 0xe0, 0xd0, 0x9b, 0x7e, 0xf9, 0x25, 0xa5, 0xce, 0xc6, 0x87, 0x3d, 0x22, 0x92, 0x8e, 0x2c, 0x44, 0x64, 0xf4, 0xc5, 0x9f, 0x0b, 0x4a, 0xfc, 0xef, 0x82, 0xab, 0xbb, 0x54, 0x1b, 0xeb, 0x26, 0xba, 0x53, 0x61, 0x46, 0xe8, 0xd7, 0xc9, 0xda, 0x68, 0x23, 0xe8, 0xf1, 0xd6, 0xa8, 0x70, 0xeb, 0xe6, 0xfe, 0xd9, 0x46, 0x72, 0x47, 0xd7, 0xdc, 0xdc, 0xe5, 0x70, 0xe8, 0xc1, 0xae, 0xde, 0x0e, 0x6c, 0xb7, 0xa9, 0x5d, 0xdd, 0xd8, 0x69, 0x80, 0x91, 0xe4, 0xdd, 0x2a, 0x66, 0xb6, 0x7d, 0x1d, 0xda, 0xb6, 0x64, 0x9c, 0x66, 0xba, 0xde, 0xb1, 0x60, 0x42, 0x4f, 0x4b, 0xdf, 0x07, 0x63, 0x92, 0x39, 0xe7, 0xd5, 0x7d, 0x69, 0xf0, 0x2c, 0xa1, 0xd1, 0xba, 0x72, 0xa7, 0x28, 0x0d, 0xd0, 0x06, 0x7c, 0x45, 0x25, 0x5b, 0x98, 0xa5, 0x34, 0xe0, 0xef, 0x62, 0xac, 0x87, 0x39, 0xb6, 0xef, 0xd2, 0xbc, 0xba, 0x45, 0xf3, 0xee, 0x22, 0xc8, 0x17, 0x4e, 0x5f, 0xec, 0x6f, 0xd2, 0xcb, 0x5a, 0x5e, 0xe6, 0xb9, 0xd7, 0xab, 0x5d, 0xf5, 0xdd, 0xe2, 0xda, 0xd3, 0x5e, 0xd7, 0xd4, 0xf3, 0xdd, 0xba, 0x5b, 0xf3, 0xbb, 0xe3, 0xde, 0x99, 0x56, 0xab, 0xa1, 0x63, 0xde, 0x4b, 0x53, 0x52, 0x8b, 0x80, 0xde, 0xa4, 0x4f, 0xfc, 0x77, 0x00, 0xe0, 0xbe, 0x4a, 0x02, 0x5f, 0xf2, 0xdf, 0x6a, 0x45, 0x68, 0x46, 0x70, 0xdc, 0x34, 0x48, 0xf7, 0x34, 0x2b, 0xd6, 0x69, 0x59, 0x3c, 0x2d, 0x46, 0xd3, 0x99, 0x63, 0x05, 0x29, 0xe0, 0xd1, 0x03, 0x6f, 0x6d, 0x26, 0xfa, 0xa6, 0x9f, 0x32, 0x19, 0xe7, 0x51, 0xb1, 0xf5, 0x34, 0x07, 0xe9, 0x36, 0xc3, 0x03, 0x39, 0x2b, 0xed, 0xb5, 0xd3, 0xd4, 0x3e, 0x00, 0xee, 0x7e, 0xd8, 0x1b, 0x44, 0xa2, 0xde, 0x3b, 0xda, 0x7d, 0x48, 0x9c, 0xdb, 0x00, 0xde, 0xaa, 0x4a, 0x8a, 0xd1, 0x06, 0xde, 0xa2, 0x44, 0x3c, 0xb4, 0x44, 0xdd, 0xd9, 0x3c, 0x21, 0x98, 0x09, 0xdd, 0x66, 0x37, 0x35, 0x81, 0xdd, 0xdb, 0xea, 0x34, 0x86, 0x6d, 0x4d, 0xdb, 0x06, 0x33, 0xad, 0x58, 0xea, 0xda, 0x99, 0x31, 0xd2, 0x41, 0xf4, 0xd5, 0x09, 0x30, 0xd3, 0x31, 0x36, 0xd2, 0x00, 0x30, 0xb1, 0x2e, 0x34, 0xd3, 0x63, 0x55, 0x9e, 0x2a, 0xe8, 0xd2, 0x1a, 0x5e, 0x89, 0x29, 0x10, 0xad, 0x7c, 0x30, 0x63, 0xe5, 0x3b, 0xb7, 0x2e, 0x31, 0x19, 0xe5, 0xe2, 0xcf, 0x42, 0x33, 0xdb, 0xea, 0x8e, 0xd6, 0x5a, 0x35, 0x83, 0xdd, 0xf5, 0xd7, 0xda, 0x35, 0xb6, 0xdb, 0xb1, 0xd9, 0xaa, 0x35, 0xeb, 0xd8, 0xdd, 0xda, 0x50, 0x34, 0x5b, 0xc5, 0x04, 0xda, 0xd1, 0x33, 0x0f, 0xad, 0xc4, 0xda, 0xc9, 0x32, 0x05, 0x94, 0xc4, 0xda, 0x3a, 0x30, 0xb9, 0x80, 0xef, 0xd8, 0x89, 0x2f, 0x81, 0x6d, 0x14, 0xd7, 0xe8, 0x2e, 0x87, 0x59, 0xa6, 0xd9, 0x20, 0x2d, 0x2c, 0x43, 0xbb, 0xd3, 0x85, 0x2d, 0xf1, 0x30, 0x74, 0xd1, 0x86, 0x2e, 0x75, 0x2e, 0x45, 0xd0, 0x41, 0x2e, 0xd2, 0x2c, 0xed, 0xcf, 0x63, 0x2f, 0x15, 0x2c, 0x03, 0x1f, 0x9a, 0xd1, 0x6e, 0xe0, 0x9e, 0x1d, 0x1e, 0xd8, 0xaf, 0xe2, 0x46, 0x1d, 0x4a, 0xe0, 0xb1, 0xe1, 0x8b, 0x1d, 0x52, 0xe2, 0x6c, 0xdf, 0xc3, 0x22, 0xe4, 0xde, 0x41, 0xd3, 0xe1, 0x24, 0xe4, 0xdc, 0xde, 0xca, 0x44, 0x24, 0x37, 0xcf, 0xde, 0xba, 0x1a, 0x2c, 0x31, 0xd2, 0xd9, 0xae, 0x68, 0x2e, 0x32, 0xce, 0x45, 0x9f, 0xb3, 0x2f, 0x40, 0xcb, 0xc6, 0x93, 0x9f, 0x2e, 0xa8, 0xcb, 0x5a, 0x85, 0x3a, 0x2e, 0x69, 0xcb, 0xfe, 0x75, 0x2b, 0x2c, 0x00, 0xce, 0x12, 0x5e, 0x83, 0x29, 0xb7, 0xcf, 0xeb, 0x2f, 0xe5, 0x29, 0xf6, 0xcf, 0xb2, 0x2d, 0x37, 0x2a, 0x35, 0xcf, 0x7d, 0x2a, 0xc8, 0x2a, 0xf5, 0xce, 0xda, 0x29, 0x3e, 0x21, 0xbf, 0xca, 0x35, 0xe0, 0x74, 0x21, 0x1f, 0xd1, 0xfc, 0xe1, 0x21, 0x1e, 0xc5, 0xda, 0xcb, 0xe2, 0xd6, 0x1e, 0xf8, 0xe1, 0xe9, 0xe2, 0x33, 0x21, 0x32, 0xe2, 0x33, 0xdc, 0x30, 0x25, 0xc0, 0xde, 0xca, 0xd1, 0xd4, 0x26, 0xd2, 0xdc, 0xf6, 0xc7, 0x9a, 0x2c, 0x31, 0xd6, 0x3a, 0xb5, 0xfd, 0x2e, 0x7e, 0xd0, 0x1b, 0xa4, 0xe2, 0x2f, 0x31, 0xcc, 0x4c, 0x95, 0x74, 0x2e, 0xa4, 0xcb, 0x5e, 0x83, 0x2b, 0x2e, 0x07, 0xcd, 0x39, 0x6c, 0xec, 0x2e, 0x0e, 0xcf, 0xb2, 0x52, 0xda, 0x2b, 0x4b, 0xd0, 0x94, 0x2f, 0xc4, 0x2b, 0x91, 0xd0, 0x3f, 0x2c, 0x4b, 0x2b, 0xdd, 0xcf, 0xea, 0x29, 0x43, 0x2c, 0xda, 0xce, 0xbd, 0x28, 0x24, 0x21, 0xc8, 0xc8, 0x8f, 0xe3, 0x5d, 0x23, 0x6d, 0xca, 0xcf, 0xe1, 0x47, 0x24, 0x8c, 0xd2, 0x61, 0xe1, 0x2c, 0x21, 0x84, 0xdd, 0x40, 0xe3, 0xc4, 0x22, 0x0d, 0xe4, 0x0f, 0xe3, 0x5e, 0x26, 0x03, 0xe2, 0x90, 0xd8, 0xd3, 0x2a, 0xcb, 0xdf, 0x9d, 0xcd, 0x44, 0x2d, 0xdf, 0xdb, 0x4f, 0xbf, 0x6c, 0x30, 0xba, 0xd3, 0x75, 0xab, 0x7e, 0x30, 0x15, 0xcc, 0x55, 0x96, 0x8a, 0x2e, 0xa8, 0xc9, 0xc5, 0x80, 0x5b, 0x2e, 0xd2, 0xcd, 0xf4, 0x65, 0x93, 0x2e, 0xa2, 0xd3, 0x91, 0x42, 0x44, 0x2e, 0x7b, 0xd1, 0xdf, 0x2f, 0x8b, 0x2e, 0xb2, 0xd1, 0x49, 0x2a, 0x98, 0x4e, 0x9a, 0xd4, 0x1d, 0x22, 0xa7, 0x57, 0xbf, 0xd2, 0x79, 0x20, 0xb0, 0x22, 0xcd, 0xc1, 0x1a, 0xe5, 0xc7, 0x23, 0xf4, 0xc6, 0xde, 0xe4, 0xe0, 0x26, 0x21, 0xcb, 0xbb, 0xe2, 0x8a, 0x29, 0x54, 0xd3, 0x64, 0xe2, 0x04, 0x27, 0x0b, 0xe0, 0x63, 0xe5, 0x75, 0x2d, 0x8d, 0xe3, 0x63, 0xe0, 0xd5, 0x31, 0x82, 0xe2, 0x62, 0xd5, 0x2f, 0x3b, 0x5c, 0xe4, 0x4d, 0xcc, 0x1a, 0x44, 0x45, 0xe0, 0xe1, 0xba, 0x2d, 0x4a, 0xba, 0xdc, 0x16, 0xa3, 0xef, 0x49, 0x9f, 0xdc, 0x88, 0x8b, 0x46, 0x48, 0x7d, 0xdb, 0xc8, 0x6a, 0xa9, 0x45, 0x0f, 0xdd, 0x53, 0x45, 0xe0, 0x4b, 0x26, 0xda, 0x7b, 0x2e, 0x8c, 0x59, 0x69, 0xd9, 0x3c, 0x23, 0x95, 0x75, 0x03, 0xd3, 0x6d, 0x22, 0x64, 0x7d, 0x2f, 0xd1, 0xbf, 0x1f, 0x90, 0x26, 0xb9, 0xb6, 0x4b, 0xe5, 0xe6, 0x26, 0x86, 0xbe, 0xeb, 0xe7, 0xda, 0x28, 0x2c, 0xc5, 0x8f, 0xe6, 0xdd, 0x2b, 0x38, 0xcd, 0x49, 0xe4, 0xac, 0x33, 0x70, 0xd5, 0x59, 0xe3, 0xb3, 0x40, 0x70, 0xe0, 0x0d, 0xe3, 0x54, 0x4d, 0xcd, 0xe3, 0x5a, 0xdc, 0xb9, 0x5a, 0x77, 0xe0, 0xca, 0xce, 0xf2, 0x64, 0xbf, 0xd7, 0xc4, 0xb9, 0xc0, 0x68, 0xba, 0xd5, 0xda, 0xa5, 0x87, 0x6b, 0x57, 0xd4, 0xce, 0x8e, 0xde, 0x6e, 0x00, 0xd4, 0xb4, 0x75, 0x4a, 0x71, 0x39, 0xd5, 0x2f, 0x5a, 0x68, 0x74, 0x41, 0xda, 0xed, 0x37, 0x8c, 0x79, 0x21, 0xd8, 0xdd, 0x25, 0xa6, 0x83, 0x42, 0xd2, 0x16, 0x1f, 0x87, 0x95, 0x15, 0xcf, 0x3e, 0x1a, 0x5a, 0x2a, 0x43, 0xaf, 0xd5, 0xe4, 0x79, 0x2b, 0xd5, 0xb5, 0xad, 0xe6, 0x61, 0x2d, 0xcc, 0xbc, 0x4f, 0xe9, 0x5a, 0x32, 0x90, 0xc3, 0xe8, 0xe8, 0xd7, 0x4b, 0xab, 0xcf, 0x55, 0xe6, 0xa0, 0x5d, 0xd2, 0xd8, 0xa0, 0xe3, 0xf3, 0x67, 0x3f, 0xd4, 0xd1, 0xd4, 0x30, 0x6f, 0x5c, 0xcf, 0x79, 0xc4, 0x2b, 0x76, 0x3f, 0xcb, 0xa8, 0xb2, 0xf0, 0x7b, 0xa5, 0xc9, 0x01, 0x9f, 0x6c, 0x7f, 0x69, 0xc7, 0xcb, 0x8a, 0xd2, 0x83, 0x53, 0xc7, 0x74, 0x73, 0xe8, 0x86, 0xb5, 0xc7, 0xfe, 0x5b, 0x7c, 0x89, 0x4c, 0xca, 0x41, 0x43, 0x02, 0x8f, 0x1b, 0xcb, 0x89, 0x2b, 0xc8, 0x92, 0xf0, 0xcc, 0xbe, 0x17, 0xab, 0x9b, 0xd2, 0xcc, 0x37, 0x16, 0xe7, 0x2d, 0x5f, 0xa2, 0xf7, 0xe3, 0x6b, 0x30, 0x32, 0xa9, 0x1a, 0xe3, 0xff, 0x34, 0xcb, 0xae, 0xc5, 0xe4, 0x0d, 0x4c, 0xe1, 0xb8, 0xca, 0xe7, 0x77, 0x5d, 0x90, 0xc3, 0x56, 0xe5, 0x1d, 0x6e, 0x35, 0xc8, 0xe9, 0xde, 0x1a, 0x77, 0x78, 0xc5, 0x8d, 0xce, 0xf6, 0x80, 0x4b, 0xc1, 0xc5, 0xbe, 0x55, 0x87, 0xca, 0xbf, 0x1c, 0xad, 0xd1, 0x8c, 0xe3, 0xbd, 0x2a, 0x9b, 0x1a, 0x91, 0x1d, 0xbb, 0xf1, 0x86, 0xea, 0x95, 0x00, 0xbb, 0x95, 0x72, 0x20, 0x97, 0xba, 0xbc, 0x57, 0x5d, 0x79, 0x99, 0xee, 0xbe, 0x94, 0x49, 0x17, 0x9c, 0xf5, 0xc1, 0x22, 0x34, 0xcc, 0x9f, 0xce, 0xc3, 0x82, 0x21, 0x36, 0xa5, 0x06, 0xc7, 0x88, 0x19, 0x92, 0x2f, 0xb8, 0x99, 0x52, 0xe3, 0xa2, 0x33, 0x23, 0x9e, 0x7b, 0xe4, 0x7c, 0x45, 0x81, 0xa3, 0x35, 0xe3, 0xee, 0x5b, 0x41, 0xac, 0x2e, 0xe4, 0xa8, 0x6c, 0xf0, 0xb3, 0x80, 0xe2, 0xe9, 0x7e, 0x2c, 0xb8, 0xee, 0xda, 0x73, 0x87, 0xff, 0xb8, 0x10, 0xcc, 0x25, 0x90, 0x79, 0xb5, 0x2e, 0xbb, 0xaa, 0x98, 0x62, 0xb3, 0xbf, 0xaa, 0xcf, 0x9d, 0x38, 0xb2, 0xb8, 0x98, 0x14, 0xa0, 0xb0, 0xb2, 0x09, 0x84, 0xb5, 0xa3, 0x22, 0xb2, 0x6d, 0x71, 0x6e, 0xa4, 0x98, 0xb3, 0x7d, 0x5e, 0x90, 0xa6, 0x83, 0xb5, 0xe2, 0x4c, 0xc1, 0xa8, 0xc4, 0xb8, 0xa4, 0x3a, 0xb1, 0xaa, 0xf0, 0xbb, 0xc8, 0x29, 0xbd, 0xb1, 0x33, 0xc1, 0x84, 0x1e, 0x47, 0x33, 0x06, 0x8c, 0x4a, 0xe4, 0xdc, 0x44, 0xdf, 0x91, 0x92, 0xe4, 0xca, 0x5c, 0x03, 0x97, 0x13, 0xe3, 0x56, 0x6b, 0x1c, 0x9f, 0x50, 0xe5, 0x13, 0x7e, 0x7b, 0xa7, 0x22, 0xe4, 0x1b, 0x90, 0x29, 0xad, 0x71, 0xdc, 0x84, 0x99, 0x6a, 0xad, 0x06, 0xcc, 0xeb, 0xa2, 0x8b, 0xab, 0x49, 0xbc, 0x8e, 0xa9, 0x48, 0xaa, 0x58, 0xaa, 0x95, 0xac, 0x48, 0xaa, 0x30, 0x97, 0x82, 0xae, 0x75, 0xa9, 0x65, 0x84, 0x97, 0xaf, 0xd3, 0xa9, 0x4e, 0x71, 0x97, 0xb0, 0xe6, 0xaa, 0xc4, 0x5f, 0xa8, 0xb2, 0x45, 0xad, 0x4b, 0x4f, 0x16, 0xb3, 0xda, 0xb0, 0x50, 0x3e, 0xeb, 0xb5, 0xdd, 0xb3, 0x32, 0x2d, 0xed, 0xbb, 0x07, 0xb9, 0xed, 0x23, 0x7e, 0x47, 0xc9, 0x7f, 0x3c, 0xe6, 0xbb, 0x5a, 0xa5, 0x83, 0xf9, 0xe6, 0x05, 0x6a, 0x14, 0x8a, 0x0c, 0xe3, 0xf0, 0x7f, 0xb1, 0x92, 0xf0, 0xe6, 0x38, 0x93, 0x14, 0x9c, 0xd3, 0xe5, 0x15, 0xa3, 0xba, 0xa3, 0xe2, 0xde, 0xd7, 0xab, 0x62, 0xa3, 0xe7, 0xcf, 0x46, 0xb3, 0x17, 0xa3, 0x2e, 0xbe, 0xb9, 0xb7, 0xf5, 0xa2, 0x5a, 0xab, 0xa5, 0xba, 0x13, 0xa1, 0x44, 0x97, 0xa9, 0xbb, 0x6f, 0xa0, 0x47, 0x84, 0xca, 0xbc, 0x16, 0x9f, 0xf0, 0x72, 0x44, 0xbc, 0xcd, 0xa0, 0x95, 0x60, 0x86, 0xbd, 0xe6, 0xa2, 0x65, 0x4f, 0x38, 0xbf, 0x27, 0xa4, 0xa9, 0x3d, 0xe0, 0xc0, 0x41, 0xa7, 0x6f, 0x2c, 0xed, 0xc4, 0x1c, 0xad, 0xcc, 0x22, 0x2a, 0x59, 0x5e, 0x74, 0x98, 0xe9, 0xfd, 0x6e, 0x94, 0x78, 0x22, 0xe8, 0xa0, 0x7f, 0xcc, 0x7e, 0x53, 0xe6, 0xa2, 0x8d, 0x92, 0x87, 0x52, 0xe6, 0xd4, 0xa3, 0x4b, 0x92, 0xa6, 0xe7, 0x25, 0xb7, 0xcb, 0x9c, 0x83, 0xe3, 0xff, 0xbc, 0x31, 0x9c, 0x6b, 0xd3, 0x58, 0xc1, 0xc3, 0x9c, 0x5d, 0xc2, 0x1f, 0xc4, 0x2f, 0x9a, 0x43, 0xac, 0xc7, 0xc5, 0xc0, 0x98, 0x16, 0x98, 0x5e, 0xc6, 0x95, 0x96, 0xde, 0x85, 0x75, 0xc6, 0xdf, 0x96, 0x1e, 0x73, 0x1b, 0xc7, 0x29, 0x96, 0x3c, 0x61, 0x14, 0xc7, 0xc2, 0x97, 0x4b, 0x4e, 0xd9, 0xc8, 0x8d, 0x98, 0x68, 0x3c, 0x03, 0xc9, 0x98, 0x9a, 0x5c, 0x2a, 0x24, 0xcb, 0x40, 0x9f, 0xd7, 0x1f, 0xfc, 0x71, 0x7a, 0x64, 0x01, 0xee, 0xca, 0x82, 0x77, 0x6b, 0xf7, 0xeb, 0xaf, 0x8f, 0xff, 0x74, 0xd4, 0xea, 0xed, 0xa0, 0x1f, 0x7d, 0x82, 0xe9, 0xb9, 0xb6, 0x0d, 0x89, 0x48, 0xeb, 0x35, 0xc9, 0x3c, 0x95, 0xfe, 0xe9, 0xd0, 0xcc, 0x77, 0x95, 0x45, 0xd8, 0x65, 0xce, 0x52, 0x93, 0xc9, 0xc4, 0xe0, 0xd0, 0x0f, 0x91, 0x59, 0xae, 0xaa, 0xd0, 0xe5, 0x8f, 0x0a, 0x99, 0xe7, 0xd1, 0x12, 0x8d, 0x55, 0x86, 0x39, 0xd1, 0x31, 0x8c, 0x41, 0x73, 0x8e, 0xd1, 0x27, 0x8b, 0xa7, 0x60, 0x83, 0xd1, 0x97, 0x8c, 0x00, 0x4c, 0xc3, 0xd2, 0x1b, 0x8c, 0xa7, 0x38, 0x92, 0xd3, 0x1d, 0x8e, 0x40, 0x26, 0xa0, 0xd0, 0x08, 0x94, 0x24, 0x21, 0x56, 0x83, 0x42, 0x5b, 0xe6, 0xf2, 0xb6, 0x8e, 0xb3, 0x61, 0xc4, 0xed, 0x7a, 0x9e, 0x51, 0x67, 0x29, 0xe9, 0xc7, 0xb0, 0x08, 0x6f, 0x56, 0xea, 0xe8, 0xc0, 0x32, 0x7b, 0x7f, 0xe9, 0x30, 0xd0, 0x89, 0x85, 0xc3, 0xe7, 0xb2, 0xd7, 0x46, 0x89, 0xb0, 0xdc, 0x55, 0xdc, 0x07, 0x8a, 0xc2, 0xc9, 0xe1, 0xdd, 0x33, 0x87, 0x30, 0xb2, 0xe3, 0xdd, 0x18, 0x83, 0xd1, 0x9b, 0x72, 0xdb, 0x6d, 0x80, 0xe1, 0x86, 0x75, 0xda, 0x54, 0x7d, 0xbf, 0x71, 0x85, 0xd8, 0x63, 0x7c, 0xdd, 0x5b, 0x64, 0xd7, 0x50, 0x7c, 0xe2, 0x45, 0xd3, 0xd6, 0x5c, 0x7e, 0xf1, 0x32, 0x7c, 0xd3, 0xdb, 0x84, 0x06, 0x28, 0xa7, 0xd1, 0x1c, 0x8b, 0x6a, 0x24, 0x8e, 0x91, 0xff, 0x4b, 0x00, 0xf1, 0xd8, 0xa2, 0x88, 0x51, 0x25, 0xec, 0x97, 0xb0, 0x62, 0x5e, 0x8d, 0xe9, 0x18, 0xbd, 0x28, 0x63, 0x63, 0xe8, 0xc7, 0xcd, 0x72, 0x6d, 0xc3, 0xe9, 0x3b, 0xd7, 0xef, 0x74, 0x41, 0xe3, 0x19, 0xd8, 0xf9, 0x76, 0x7d, 0xd8, 0x5f, 0xdc, 0xa2, 0x75, 0xfe, 0xc4, 0x16, 0xdd, 0xda, 0x71, 0xf4, 0xab, 0x49, 0xdd, 0x7a, 0x6f, 0x08, 0x94, 0x11, 0xdb, 0xcc, 0x6c, 0xf4, 0x7e, 0xc1, 0xda, 0x5b, 0x69, 0xea, 0x69, 0x2c, 0xda, 0x55, 0x6a, 0x01, 0x53, 0x57, 0xdd, 0x51, 0x69, 0x59, 0x3c, 0x8c, 0xd8, 0x96, 0x6f, 0xfb, 0x2f, 0x75, 0xd3, 0xf2, 0x77, 0x8f, 0x29, 0xfc, 0xd1, 0xb5, 0x80, 0x4c, 0x26, 0xb7, 0x9d, 0x01, 0x37, 0x83, 0xf2, 0x09, 0xb1, 0x9b, 0x44, 0x4b, 0xec, 0x3f, 0xc0, 0x69, 0x4d, 0x52, 0xec, 0xd2, 0xcb, 0xe3, 0x57, 0x06, 0xeb, 0x06, 0xd5, 0x37, 0x60, 0x0f, 0xe5, 0xc5, 0xd8, 0x44, 0x60, 0x2f, 0xdd, 0xc2, 0xd9, 0xa5, 0x65, 0x5d, 0xd7, 0x23, 0xdd, 0x8d, 0x60, 0x15, 0xbd, 0x72, 0xde, 0xc3, 0x5c, 0x58, 0xa4, 0xed, 0xde, 0x1f, 0x58, 0xde, 0x8d, 0x99, 0xde, 0xc1, 0x56, 0x15, 0x79, 0x7d, 0xe0, 0x3b, 0x50, 0xab, 0x62, 0x75, 0xe1, 0x3a, 0x4d, 0xd1, 0x4a, 0xcb, 0xde, 0xf9, 0x52, 0x51, 0x36, 0xd0, 0xd8, 0xec, 0x5e, 0xa3, 0x2e, 0xf4, 0xd3, 0xa6, 0x69, 0xa2, 0x2a, 0x91, 0xd1, 0xeb, 0x73, 0x13, 0x28, 0x38, 0xac, 0x5d, 0x34, 0x22, 0xe9, 0x8b, 0xb6, 0x79, 0x35, 0xe6, 0xeb, 0x24, 0xcc, 0x34, 0x40, 0xa1, 0xef, 0xb8, 0xd1, 0x22, 0x46, 0xe3, 0xe9, 0x34, 0xd8, 0xb3, 0x4a, 0x49, 0xdd, 0xdd, 0xdb, 0x14, 0x50, 0x67, 0xdb, 0x7c, 0xdc, 0x64, 0x55, 0x17, 0xd2, 0x17, 0xe0, 0x33, 0x4b, 0xd7, 0xb8, 0x7c, 0xe0, 0xa4, 0x46, 0x39, 0x9c, 0xd1, 0xdf, 0x51, 0x3d, 0xbc, 0x84, 0xc0, 0xdd, 0x3e, 0x36, 0x97, 0x6e, 0x6e, 0xdc, 0x4d, 0x35, 0xfe, 0x5a, 0x3e, 0xdb, 0x6f, 0x34, 0x07, 0x43, 0x70, 0xd6, 0x9b, 0x32, 0x76, 0x32, 0x68, 0xd6, 0x8b, 0x45, 0x7f, 0x2e, 0xf0, 0xd5, 0x26, 0x5b, 0x74, 0x2b, 0xcc, 0xd3, 0x80, 0x63, 0xb8, 0x29, 0xb4, 0xab, 0xe9, 0x33, 0x81, 0xe2, 0x8d, 0xb4, 0x2b, 0x34, 0x7f, 0xe3, 0xdd, 0xd1, 0xaa, 0x36, 0x0a, 0xec, 0xd0, 0xd6, 0xb9, 0x36, 0xd5, 0xdd, 0xaf, 0xd8, 0x1e, 0x37, 0x21, 0xdb, 0x9a, 0xda, 0x0b, 0x39, 0xff, 0xd9, 0x3a, 0xda, 0xaf, 0x35, 0xf3, 0xc5, 0x36, 0xdb, 0x3b, 0x34, 0xb8, 0xae, 0x10, 0xdb, 0x4f, 0x33, 0xb1, 0x95, 0x2e, 0xdb, 0x5f, 0x32, 0x92, 0x81, 0xcb, 0xd9, 0xa4, 0x31, 0x27, 0x6d, 0xf3, 0xd8, 0xf2, 0x30, 0x50, 0x5a, 0xcd, 0xd9, 0xe5, 0x2f, 0x1c, 0x45, 0x15, 0xd4, 0xa9, 0x2f, 0x13, 0x31, 0x49, 0xd2, 0x66, 0x2f, 0x59, 0x2e, 0xee, 0xd0, 0xf9, 0x2f, 0x8b, 0x2d, 0x76, 0xd3, 0x6d, 0x58, 0x90, 0x2a, 0x92, 0x22, 0x02, 0xd3, 0x39, 0xe0, 0x7e, 0x1e, 0xe1, 0xda, 0xfa, 0xe2, 0xe0, 0x1f, 0x2f, 0xe1, 0xb3, 0xe2, 0x48, 0x20, 0x43, 0xe2, 0xfb, 0xe0, 0x02, 0x24, 0xdc, 0xdf, 0x0e, 0xd4, 0x66, 0x26, 0xb7, 0xdd, 0xa9, 0xca, 0xc3, 0x28, 0x47, 0xda, 0x8d, 0xc2, 0x0d, 0x2d, 0x1a, 0xd3, 0x58, 0xae, 0xa5, 0x2e, 0x95, 0xce, 0x75, 0x9f, 0x89, 0x2f, 0x6c, 0xcb, 0xc8, 0x93, 0x34, 0x2e, 0xaa, 0xcb, 0x60, 0x84, 0xa0, 0x2e, 0x8f, 0xcc, 0x22, 0x74, 0x99, 0x2c, 0x99, 0xce, 0x5e, 0x5e, 0xd5, 0x2b, 0x0c, 0xd0, 0x91, 0x30, 0x83, 0x2b, 0x3b, 0xd0, 0x56, 0x2e, 0x1a, 0x2b, 0x69, 0xd0, 0x22, 0x2b, 0xe3, 0x2b, 0x9a, 0xcf, 0xef, 0x29, 0xd6, 0x23, 0x1d, 0xca, 0xf9, 0xe1, 0x0b, 0x24, 0x18, 0xd3, 0xbb, 0xe0, 0xde, 0x20, 0xd6, 0xdd, 0x30, 0xe3, 0x8d, 0x21, 0x43, 0xe3, 0x0c, 0xe3, 0x0f, 0x24, 0x49, 0xe2, 0xd2, 0xdc, 0x79, 0x27, 0xe9, 0xdf, 0xa8, 0xd2, 0x6a, 0x28, 0x8f, 0xde, 0x0c, 0xc8, 0x67, 0x2d, 0x70, 0xd6, 0xee, 0xb6, 0x62, 0x2f, 0x0d, 0xd0, 0x69, 0xa4, 0xcf, 0x2f, 0x63, 0xcc, 0x53, 0x95, 0x0c, 0x2e, 0xa6, 0xcb, 0x63, 0x82, 0x90, 0x2e, 0x5a, 0xcd, 0x70, 0x6c, 0xa2, 0x2f, 0x3c, 0xd0, 0x52, 0x53, 0x3b, 0x2c, 0xcb, 0xd1, 0x4f, 0x30, 0x94, 0x2c, 0xf8, 0xd0, 0xfd, 0x2d, 0x8b, 0x2d, 0x26, 0xd0, 0xb3, 0x2a, 0xcd, 0x44, 0xb6, 0xd4, 0x35, 0x23, 0xbb, 0x24, 0x79, 0xca, 0x41, 0xe2, 0x1e, 0x24, 0xfe, 0xcb, 0xaf, 0xe1, 0xea, 0x27, 0x31, 0xd4, 0x70, 0xe1, 0x6b, 0x24, 0x07, 0xdf, 0xb4, 0xe4, 0x9d, 0x25, 0x52, 0xe4, 0x51, 0xe3, 0x69, 0x28, 0xa9, 0xe3, 0x36, 0xd9, 0x1f, 0x2c, 0xdc, 0xe0, 0x69, 0xcd, 0xbe, 0x2f, 0x51, 0xdc, 0x35, 0xc0, 0x2a, 0x30, 0xb6, 0xd3, 0x54, 0xaa, 0x91, 0x30, 0x1f, 0xcc, 0x20, 0x95, 0xdb, 0x2e, 0xdf, 0xc8, 0xff, 0x7f, 0x93, 0x2f, 0x31, 0xcd, 0xff, 0x65, 0x2b, 0x30, 0x6b, 0xd4, 0x79, 0x42, 0x5f, 0x2f, 0xf7, 0xd2, 0xa1, 0x30, 0xbc, 0x40, 0x96, 0xd6, 0x8f, 0x2a, 0x63, 0x52, 0x60, 0xd6, 0xd6, 0x27, 0x81, 0x77, 0x69, 0xd5, 0x0a, 0x23, 0x1f, 0x24, 0xce, 0xc1, 0xb6, 0xe6, 0x15, 0x26, 0x01, 0xc8, 0xee, 0xe4, 0xee, 0x27, 0xef, 0xcc, 0xbb, 0xe3, 0x30, 0x2c, 0x42, 0xd5, 0x85, 0xe2, 0x50, 0x29, 0xf1, 0xe2, 0xea, 0xe6, 0x54, 0x30, 0xab, 0xe3, 0x81, 0xe0, 0xbe, 0x33, 0x7e, 0xe3, 0x2e, 0xd5, 0xc7, 0x42, 0x9e, 0xe4, 0xb0, 0xcc, 0xa1, 0x4d, 0x60, 0xe0, 0x78, 0xba, 0x3b, 0x4f, 0x88, 0xdd, 0xd1, 0xa5, 0x83, 0x4e, 0x13, 0xde, 0x89, 0x8c, 0x70, 0x4d, 0xb0, 0xdd, 0xfe, 0x6c, 0x5b, 0x4e, 0x52, 0xdc, 0x81, 0x4c, 0x36, 0x52, 0xc9, 0xdc, 0x4b, 0x31, 0x8c, 0x5f, 0x25, 0xdc, 0x96, 0x27, 0xf9, 0x7a, 0x4b, 0xd8, 0x06, 0x24, 0xcf, 0x7f, 0xb0, 0xd5, 0x0b, 0x21, 0x8c, 0x27, 0x5f, 0xb8, 0x75, 0xe7, 0x4f, 0x28, 0xdf, 0xc0, 0xaf, 0xe7, 0xae, 0x2a, 0x56, 0xc7, 0x9c, 0xe6, 0xe8, 0x2d, 0x2d, 0xce, 0x69, 0xe5, 0x37, 0x36, 0x84, 0xd7, 0x83, 0xe3, 0xfa, 0x43, 0x1f, 0xe3, 0x4c, 0xe5, 0x75, 0x56, 0xc2, 0xe2, 0x07, 0xdb, 0x3f, 0x63, 0x1e, 0xe2, 0xea, 0xd0, 0x89, 0x6f, 0xc1, 0xe3, 0x0b, 0xc4, 0xba, 0x74, 0x43, 0xe1, 0x1c, 0xb0, 0x39, 0x76, 0x7c, 0xdf, 0xed, 0x9a, 0x30, 0x79, 0xd7, 0xdf, 0x9f, 0x80, 0x95, 0x7c, 0x9e, 0xdf, 0x96, 0x66, 0x35, 0x7d, 0x99, 0xe4, 0x4b, 0x3d, 0x2a, 0x81, 0xc4, 0xde, 0x8d, 0x2d, 0xca, 0x89, 0xa1, 0xd7, 0xef, 0x24, 0x1c, 0x98, 0xff, 0xd4, 0xb2, 0x20, 0xa0, 0x2b, 0xc8, 0xb2, 0x49, 0xe5, 0x69, 0x2d, 0xd7, 0xb6, 0x97, 0xe6, 0xed, 0x30, 0x0c, 0xbe, 0xc6, 0xea, 0x29, 0x36, 0x49, 0xc7, 0x18, 0xe9, 0xf8, 0x53, 0xfb, 0xd1, 0xcf, 0xe7, 0x14, 0x5f, 0x40, 0xdb, 0x1b, 0xe5, 0x9b, 0x6e, 0xe4, 0xdd, 0x35, 0xdd, 0x74, 0x79, 0xc4, 0xdb, 0x72, 0xcf, 0x7d, 0x81, 0x54, 0xd6, 0xda, 0xbe, 0xde, 0x85, 0x66, 0xd4, 0xfe, 0xab, 0x03, 0x89, 0x5b, 0xd3, 0x91, 0x95, 0x80, 0x8c, 0x2d, 0xd2, 0xe6, 0x7e, 0x06, 0x8f, 0x8f, 0xd2, 0xb4, 0x65, 0xfe, 0x94, 0x87, 0xd0, 0xc2, 0x4d, 0xa7, 0x99, 0x0d, 0xd0, 0xd0, 0x35, 0x5b, 0x9b, 0xe3, 0xd4, 0x75, 0x1f, 0xe2, 0xa1, 0x70, 0xd1, 0x57, 0x1a, 0xff, 0x2e, 0xf0, 0xa5, 0x2e, 0xe3, 0xea, 0x31, 0xa7, 0xaa, 0x3f, 0xe4, 0x77, 0x38, 0xdf, 0xb2, 0x69, 0xe7, 0x29, 0x55, 0x33, 0xbb, 0x6d, 0xe6, 0xdb, 0x61, 0x97, 0xc4, 0xd4, 0xe3, 0x59, 0x72, 0x93, 0xc9, 0x44, 0xdd, 0x6c, 0x82, 0x30, 0xcf, 0x75, 0xd7, 0xeb, 0x8a, 0xcc, 0xcd, 0x37, 0xc8, 0xdf, 0x91, 0x4e, 0xca, 0xca, 0xb8, 0x8c, 0x97, 0x7f, 0xc8, 0x47, 0xa6, 0x1c, 0x9b, 0xa7, 0xc7, 0x44, 0x91, 0xc2, 0x9f, 0x11, 0xc6, 0x7c, 0x7c, 0xa4, 0xa1, 0x75, 0xc6, 0x93, 0x67, 0x45, 0xa3, 0x1e, 0xc6, 0xb7, 0x52, 0x21, 0xa5, 0x93, 0xc9, 0x62, 0x3c, 0x7c, 0xa8, 0x71, 0xcc, 0xbb, 0x28, 0x8d, 0xab, 0x73, 0xcf, 0xbc, 0x17, 0x4e, 0x31, 0x3e, 0x9b, 0xc3, 0xe3, 0xf5, 0x36, 0x05, 0x9f, 0x37, 0xe4, 0xb4, 0x53, 0x53, 0xa6, 0xa6, 0xe5, 0x8c, 0x5f, 0xda, 0xae, 0x55, 0xe3, 0x94, 0x72, 0x42, 0xb4, 0xb2, 0xe0, 0x93, 0x82, 0xec, 0xbc, 0x53, 0xda, 0x48, 0x92, 0xa4, 0xc2, 0xf5, 0xd5, 0xd3, 0x9b, 0x08, 0xc0, 0xd5, 0xc5, 0xea, 0xa3, 0xed, 0xbf, 0x28, 0xb6, 0x34, 0xa7, 0xef, 0xbe, 0x31, 0xa3, 0x9b, 0xab, 0x48, 0xbd, 0x5e, 0x8f, 0x9e, 0xad, 0xa4, 0xbd, 0x65, 0x7b, 0xd2, 0xae, 0xef, 0xbe, 0x3a, 0x68, 0x24, 0xb0, 0x18, 0xbf, 0xff, 0x55, 0x79, 0xb2, 0x00, 0xc2, 0x94, 0x42, 0xa3, 0xb4, 0x05, 0xc5, 0x72, 0x30, 0x61, 0xb6, 0x05, 0xc8, 0x93, 0x1f, 0xa5, 0x39, 0x90, 0x8e, 0xc7, 0xe5, 0x20, 0x53, 0x2a, 0x92, 0x51, 0xe2, 0x16, 0x5e, 0x3d, 0x99, 0xe4, 0xe3, 0xf0, 0x73, 0x85, 0xa2, 0x21, 0xe5, 0x1e, 0x84, 0xb5, 0xa9, 0xb0, 0xe2, 0xc1, 0x93, 0x92, 0xb0, 0x62, 0xdc, 0xe1, 0xa4, 0xad, 0xb7, 0x7b, 0xd6, 0x8a, 0xac, 0xd1, 0xb5, 0xc8, 0xc6, 0x5c, 0xb3, 0xc1, 0xb4, 0xd5, 0xb5, 0x10, 0xb6, 0xd2, 0xb4, 0xcf, 0xa2, 0xea, 0xb8, 0xd3, 0xb4, 0x70, 0x8f, 0x2c, 0xba, 0x15, 0xb4, 0x73, 0x7b, 0xb4, 0xba, 0xfb, 0xb5, 0x8b, 0x69, 0x34, 0xbc, 0x14, 0xb7, 0x94, 0x57, 0x88, 0xbd, 0x56, 0xba, 0x9a, 0x47, 0x4d, 0xbe, 0xae, 0xbd, 0xf5, 0x36, 0x53, 0xbf, 0xe6, 0xbf, 0x81, 0x24, 0x37, 0x52, 0xe1, 0x80, 0x07, 0xe5, 0xbc, 0x5f, 0x04, 0x87, 0x3b, 0xe6, 0x43, 0x71, 0x0c, 0x8d, 0x31, 0xe5, 0xab, 0x84, 0xea, 0x95, 0x76, 0xe5, 0xdc, 0x97, 0x31, 0x9f, 0xa6, 0xe4, 0x59, 0xa7, 0x5c, 0xa6, 0x70, 0xde, 0xf6, 0xb6, 0xda, 0xae, 0xa9, 0xda, 0x09, 0xbd, 0x06, 0xae, 0x25, 0xc8, 0x9e, 0xc1, 0x60, 0xad, 0x32, 0xb6, 0x0f, 0xc3, 0x2c, 0xab, 0x99, 0xa2, 0x9c, 0xc4, 0x59, 0xaa, 0xdb, 0x8f, 0x37, 0xc5, 0x02, 0xaa, 0xa7, 0x7b, 0xe0, 0xc5, 0x98, 0xaa, 0xce, 0x69, 0x7c, 0xc6, 0x50, 0xac, 0x00, 0x57, 0x6c, 0xc7, 0x28, 0xad, 0xdb, 0x45, 0x42, 0xc8, 0x1d, 0xaf, 0xc2, 0x33, 0x3f, 0xc9, 0x53, 0xb2, 0x13, 0x22, 0x55, 0x5f, 0x25, 0x74, 0xa0, 0xe7, 0xe9, 0x71, 0x59, 0x7a, 0x67, 0xe8, 0xc2, 0x87, 0x63, 0x82, 0xf2, 0xe7, 0x16, 0x93, 0x1a, 0x8b, 0x16, 0xe6, 0x7e, 0xa7, 0xca, 0x96, 0x6a, 0xe7, 0x75, 0xb8, 0xb2, 0x9f, 0x0e, 0xe3, 0x92, 0xc6, 0x27, 0xa6, 0xf5, 0xdd, 0xc2, 0xca, 0x31, 0xa5, 0xed, 0xcb, 0x6b, 0xcc, 0xb6, 0xa4, 0x54, 0xb7, 0x30, 0xce, 0x93, 0xa2, 0x96, 0xa3, 0x1e, 0xcf, 0x3e, 0xa1, 0x85, 0x8f, 0xd5, 0xcf, 0x66, 0xa0, 0x8e, 0x7c, 0xa7, 0xcf, 0xbe, 0xa0, 0x5e, 0x69, 0xf9, 0xcf, 0xf8, 0xa0, 0x86, 0x57, 0x19, 0xd0, 0xcc, 0xa1, 0xda, 0x44, 0x03, 0xd0, 0xe3, 0xa1, 0xf5, 0x2f, 0xdc, 0xd1, 0xfc, 0xa3, 0x60, 0x1e, 0x1b, 0x75, 0x4a, 0x69, 0x28, 0xed, 0xfc, 0x88, 0x97, 0x71, 0x02, 0xed, 0x0f, 0x90, 0xa4, 0x76, 0xe8, 0xea, 0xbf, 0xa5, 0x01, 0x81, 0xea, 0xe9, 0x99, 0xb8, 0xee, 0x8d, 0x1e, 0xeb, 0x56, 0xca, 0xfb, 0x98, 0xe9, 0xea, 0xcd, 0xd3, 0x27, 0x9d, 0x0c, 0xe0, 0x33, 0xd8, 0x0d, 0x9e, 0x64, 0xcf, 0xf1, 0xd9, 0x93, 0x9b, 0xc3, 0xb9, 0x23, 0xd9, 0xa5, 0x99, 0x33, 0xa4, 0x04, 0xd9, 0x49, 0x97, 0x4c, 0x8f, 0xf3, 0xd8, 0xff, 0x95, 0x6f, 0x7c, 0xca, 0xd6, 0x51, 0x92, 0x61, 0x66, 0xf6, 0xd4, 0xe6, 0x91, 0x75, 0x51, 0x54, 0xd3, 0xb5, 0x91, 0x51, 0x3b, 0xb1, 0xd3, 0xc6, 0x94, 0x9f, 0x2e, 0xd0, 0xd3, 0x0a, 0x99, 0x52, 0x26, 0x1e, 0x89, 0x3b, 0x60, 0xf3, 0xf1, 0x16, 0x93, 0x7d, 0x63, 0xe4, 0xec, 0x5c, 0xa0, 0x8b, 0x6a, 0xcd, 0xea, 0x13, 0xb4, 0x28, 0x74, 0x4a, 0xeb, 0x06, 0xc4, 0x03, 0x7f, 0xff, 0xe9, 0xfc, 0xd1, 0xb4, 0x89, 0x33, 0xe5, 0x6a, 0xd7, 0xbc, 0x8c, 0xb9, 0xdb, 0x91, 0xdb, 0xc0, 0x8e, 0x9b, 0xcc, 0x03, 0xdd, 0x13, 0x8b, 0xc2, 0xb5, 0x06, 0xdd, 0x0d, 0x88, 0x4b, 0x9d, 0x69, 0xdb, 0x49, 0x85, 0x97, 0x88, 0x99, 0xda, 0x22, 0x82, 0x97, 0x73, 0xfb, 0xd7, 0xd4, 0x81, 0xa7, 0x5e, 0x1d, 0xd6, 0xae, 0x81, 0xd9, 0x48, 0xec, 0xd5, 0x4c, 0x82, 0x9f, 0x34, 0xb2, 0xd4, 0x16, 0x88, 0xb4, 0x2c, 0xae, 0xd3, 0x26, 0x8e, 0x8a, 0x27, 0x1a, 0x96, 0xa5, 0x4c, 0x41, 0xf0, 0xad, 0xa2, 0x59, 0x5a, 0x4b, 0xeb, 0xe7, 0xb3, 0xd2, 0x62, 0x56, 0xe8, 0xd9, 0xc0, 0x19, 0x68, 0x76, 0xe8, 0xee, 0xd0, 0x7a, 0x72, 0xc1, 0xe9, 0x69, 0xd7, 0x0e, 0x77, 0x3e, 0xdf, 0x00, 0xd8, 0xb6, 0x7a, 0xaa, 0xd8, 0xb5, 0xdc, 0x55, 0x7b, 0x31, 0xc6, 0x38, 0xdc, 0xde, 0x77, 0xf6, 0xad, 0xf0, 0xdd, 0x4e, 0x74, 0x4d, 0x95, 0xfb, 0xdb, 0xa4, 0x72, 0x14, 0x80, 0xf4, 0xda, 0x05, 0x6f, 0x5e, 0x6b, 0xef, 0xd9, 0xf3, 0x6f, 0x2b, 0x56, 0x06, 0xdc, 0x53, 0x6e, 0xde, 0x3f, 0x1e, 0xd8, 0x59, 0x75, 0x30, 0x31, 0xe5, 0xd6, 0x3e, 0x7c, 0x86, 0x2c, 0x05, 0xd3, 0x72, 0x84, 0x56, 0x28, 0x1f, 0xa8, 0x09, 0x45, 0x01, 0xed, 0xa1, 0xaf, 0x39, 0x4b, 0xc9, 0xeb, 0xd7, 0xc3, 0x12, 0x50, 0xcd, 0xec, 0x34, 0xcc, 0x77, 0x5c, 0x00, 0xea, 0xa7, 0xd6, 0xce, 0x61, 0x17, 0xe3, 0xf8, 0xd8, 0x3b, 0x64, 0xe4, 0xdc, 0xfe, 0xd9, 0x32, 0x69, 0xb8, 0xd7, 0x7d, 0xdd, 0x4c, 0x65, 0x64, 0xc0, 0x30, 0xde, 0x9e, 0x60, 0xed, 0xa7, 0x21, 0xde, 0x95, 0x5e, 0x4c, 0x8f, 0xff, 0xde, 0x7e, 0x5b, 0x85, 0x7b, 0x88, 0xdf, 0x79, 0x58, 0xfe, 0x65, 0x83, 0xe0, 0x26, 0x58, 0x76, 0x50, 0xd5, 0xe0, 0x5f, 0x5a, 0xdc, 0x39, 0xc8, 0xda, 0x9d, 0x63, 0x17, 0x30, 0x98, 0xd5, 0x43, 0x6e, 0x9c, 0x2b, 0xd8, 0xd3, 0x60, 0x77, 0x6d, 0x29, 0x55, 0xae, 0x34, 0x35, 0xa9, 0xeb, 0x2a, 0xbb, 0x03, 0x37, 0xcd, 0xed, 0x13, 0xca, 0xee, 0x48, 0x80, 0xed, 0x0d, 0xd5, 0x41, 0x4c, 0x02, 0xe4, 0x71, 0xd9, 0xa8, 0x51, 0xfc, 0xde, 0x25, 0xd8, 0x08, 0x57, 0x48, 0xda, 0x5d, 0xdb, 0x38, 0x59, 0xc0, 0xd5, 0x46, 0xe0, 0x08, 0x52, 0x7e, 0xbb, 0xd8, 0xe1, 0x50, 0x4b, 0x46, 0x9f, 0x37, 0xe1, 0x3c, 0x47, 0x60, 0x88, 0xaa, 0xe2, 0x70, 0x3f, 0x0e, 0x74, 0x36, 0xdd, 0x85, 0x39, 0x05, 0x5b, 0xde, 0xdc, 0x23, 0x35, 0xe4, 0x44, 0xbb, 0xd8, 0x2a, 0x34, 0x18, 0x33, 0x9a, 0xd8, 0x5f, 0x4f, 0x4b, 0x30, 0x28, 0xd6, 0x80, 0x5d, 0x97, 0x2c, 0xc9, 0xd3, 0x1b, 0x6a, 0x3a, 0x29, 0xe5, 0xb0, 0x12, 0x34, 0xd4, 0xe4, 0x3e, 0xbf, 0xec, 0x35, 0xa5, 0xea, 0x4c, 0xd3, 0xce, 0x36, 0xc1, 0xee, 0x24, 0xd7, 0x09, 0x37, 0xf1, 0xdd, 0x74, 0xd8, 0x9e, 0x3a, 0xee, 0xdb, 0xb7, 0xdb, 0x06, 0x47, 0x77, 0xd9, 0xd6, 0xde, 0x21, 0x47, 0x5c, 0xd0, 0x6f, 0xdb, 0x94, 0x36, 0x1b, 0xae, 0x3c, 0xdb, 0xc0, 0x35, 0x1a, 0x95, 0x74, 0xdc, 0x34, 0x34, 0x2f, 0x82, 0x69, 0xda, 0x9f, 0x32, 0xa3, 0x6e, 0xb5, 0xd9, 0xe1, 0x31, 0xec, 0x5b, 0xd5, 0xda, 0x94, 0x30, 0xd1, 0x46, 0x4e, 0xd5, 0xc9, 0x30, 0x39, 0x32, 0x22, 0xd3, 0x45, 0x30, 0x3b, 0x2f, 0x95, 0xd1, 0xae, 0x30, 0x44, 0x2d, 0xfd, 0xd4, 0x9b, 0x5c, 0x25, 0x2b, 0x36, 0x22, 0xb1, 0xd4, 0xc0, 0xe1, 0x8a, 0x20, 0x76, 0xdd, 0x37, 0xe3, 0x71, 0x20, 0xdf, 0xe2, 0x8e, 0xe2, 0xea, 0x22, 0xc7, 0xe3, 0x58, 0xe0, 0x1d, 0x26, 0x82, 0xdf, 0xb0, 0xd4, 0xcf, 0x28, 0x35, 0xde, 0x49, 0xcb, 0x24, 0x29, 0x6d, 0xdb, 0x5e, 0xc2, 0xc2, 0x2d, 0xd5, 0xd3, 0xb7, 0xae, 0xcb, 0x2e, 0xdf, 0xce, 0x92, 0x9f, 0x52, 0x2f, 0x90, 0xcb, 0xc2, 0x92, 0xc4, 0x2e, 0xaa, 0xcb, 0x63, 0x84, 0x06, 0x2e, 0x09, 0xcc, 0xe9, 0x71, 0xd1, 0x2d, 0x11, 0xce, 0x9b, 0x5f, 0x17, 0x2c, 0x1f, 0xd1, 0x14, 0x31, 0x01, 0x2c, 0x42, 0xd0, 0xdd, 0x2e, 0xd0, 0x2c, 0x64, 0xd0, 0xa8, 0x2c, 0xc9, 0x2c, 0x88, 0xd0, 0x76, 0x2a, 0xe4, 0x24, 0x03, 0xcc, 0x5f, 0xe2, 0x7f, 0x24, 0x9d, 0xd5, 0x5a, 0xe2, 0x1e, 0x22, 0xb2, 0xdf, 0x75, 0xe4, 0x38, 0x23, 0x3f, 0xe3, 0xfd, 0xe3, 0xc1, 0x26, 0xc1, 0xe3, 0x4d, 0xdc, 0xae, 0x29, 0x9d, 0xe0, 0x4e, 0xd2, 0xd9, 0x29, 0xf0, 0xde, 0xdf, 0xc9, 0x03, 0x2e, 0x63, 0xd7, 0x6f, 0xb6, 0xa2, 0x2f, 0x74, 0xd0, 0x98, 0xa4, 0xa5, 0x2f, 0x88, 0xcc, 0x4f, 0x94, 0x9e, 0x2e, 0x86, 0xca, 0xe8, 0x81, 0xcf, 0x2e, 0x9c, 0xcd, 0x9e, 0x6c, 0x50, 0x30, 0x21, 0xd0, 0xca, 0x53, 0x74, 0x2d, 0xed, 0xd1, 0xdc, 0x31, 0x32, 0x2e, 0x0a, 0xd1, 0x8e, 0x2e, 0x7e, 0x2e, 0x29, 0xd1, 0x46, 0x2c, 0x09, 0x4c, 0x88, 0xd6, 0xc8, 0x24, 0x62, 0x25, 0xbe, 0xca, 0xff, 0xe2, 0x96, 0x26, 0x67, 0xcd, 0x19, 0xe3, 0x43, 0x27, 0x6a, 0xd6, 0x32, 0xe2, 0xee, 0x26, 0x25, 0xe2, 0x09, 0xe5, 0x54, 0x27, 0xdc, 0xe4, 0x5c, 0xe3, 0x47, 0x2a, 0xa4, 0xe3, 0xaa, 0xd9, 0x4b, 0x2e, 0x7b, 0xe0, 0xbb, 0xcd, 0xbd, 0x30, 0x57, 0xdc, 0xd6, 0xc0, 0xb5, 0x31, 0xe3, 0xd2, 0xe8, 0xaa, 0x74, 0x2f, 0x23, 0xc9, 0x77, 0x94, 0x00, 0x2e, 0xd9, 0xc8, 0xe6, 0x7e, 0xd9, 0x31, 0x19, 0xcc, 0x57, 0x65, 0x66, 0x31, 0xa5, 0xd5, 0x1a, 0x42, 0x3c, 0x30, 0xf6, 0xd3, 0x23, 0x31, 0x88, 0x44, 0xd9, 0xd8, 0x00, 0x2b, 0xe1, 0x54, 0x7d, 0xd8, 0xd1, 0x29, 0xb0, 0x7b, 0xdc, 0xd7, 0xfb, 0x24, 0x7d, 0x25, 0xff, 0xc3, 0xae, 0xe6, 0xd3, 0x28, 0x41, 0xcb, 0xab, 0xe3, 0xbd, 0x29, 0xfa, 0xce, 0x19, 0xe4, 0x5a, 0x2b, 0xe4, 0xd7, 0x7d, 0xe4, 0x23, 0x2c, 0x4a, 0xe3, 0xe8, 0xe5, 0x44, 0x32, 0xcb, 0xe3, 0x88, 0xe0, 0x95, 0x39, 0x18, 0xe5, 0x33, 0xd8, 0xa5, 0x47, 0x0e, 0xe5, 0x65, 0xcd, 0x76, 0x51, 0x7c, 0xe1, 0xc1, 0xbb, 0xb1, 0x57, 0xe3, 0xde, 0xb9, 0xa8, 0x79, 0x57, 0x08, 0xdd, 0xe0, 0x8e, 0x35, 0x55, 0x3e, 0xde, 0x80, 0x70, 0x3e, 0x53, 0xa4, 0xde, 0x32, 0x4e, 0x93, 0x57, 0xc1, 0xdd, 0xe2, 0x32, 0xc0, 0x67, 0xe1, 0xdd, 0x77, 0x2c, 0x48, 0x7f, 0x99, 0xdc, 0x90, 0x27, 0x46, 0x85, 0x82, 0xd7, 0x6c, 0x24, 0x90, 0x28, 0xf0, 0xbb, 0x25, 0xe8, 0x76, 0x2a, 0xcb, 0xc2, 0x69, 0xe7, 0x77, 0x2c, 0x11, 0xc9, 0x99, 0xe6, 0xdb, 0x2f, 0xf1, 0xcf, 0x98, 0xe6, 0x03, 0x3b, 0xe1, 0xda, 0x89, 0xe5, 0xbc, 0x4f, 0xb2, 0xe1, 0x52, 0xe2, 0x8a, 0x5c, 0x6f, 0xe1, 0x8e, 0xdb, 0x33, 0x68, 0x94, 0xe2, 0xbf, 0xd0, 0x84, 0x72, 0xeb, 0xe6, 0x14, 0xc6, 0x0c, 0x75, 0xb4, 0xe4, 0x51, 0xb2, 0x45, 0x78, 0x75, 0xe3, 0x78, 0x9a, 0x9e, 0x7d, 0xd0, 0xde, 0xf6, 0x81, 0x1d, 0x7c, 0x31, 0xe3, 0xf3, 0x62, 0x3a, 0x81, 0xea, 0xe1, 0x8e, 0x46, 0x26, 0x88, 0x14, 0xe0, 0x3d, 0x32, 0xed, 0x95, 0x45, 0xda, 0x81, 0x25, 0x45, 0x9d, 0xfd, 0xd8, 0xf4, 0x23, 0x30, 0x2d, 0x1b, 0xb4, 0xb8, 0xe6, 0x54, 0x2f, 0x97, 0xb7, 0x64, 0xe7, 0x66, 0x31, 0xd2, 0xc0, 0x78, 0xe9, 0xe5, 0x52, 0x14, 0xca, 0xba, 0xe5, 0x52, 0x58, 0xd7, 0xd3, 0x73, 0xe7, 0x8c, 0x64, 0x7e, 0xdb, 0x03, 0xe3, 0xce, 0x75, 0x40, 0xdb, 0x69, 0xdb, 0x1d, 0x80, 0x77, 0xde, 0xea, 0xd2, 0xc2, 0x8c, 0x08, 0xe3, 0x3a, 0xca, 0x20, 0x91, 0x6e, 0xdd, 0xf9, 0xb4, 0xab, 0x95, 0x45, 0xdc, 0xf7, 0x9f, 0x9c, 0x98, 0xce, 0xdb, 0x87, 0x88, 0x71, 0x9b, 0xbc, 0xd9, 0xe3, 0x6f, 0xf2, 0x9e, 0x59, 0xd8, 0x34, 0x57, 0x7a, 0xa3, 0x27, 0xd6, 0x66, 0x3e, 0xbf, 0xa4, 0xf3, 0xdb, 0xf8, 0x27, 0xda, 0xa7, 0x1f, 0xd6, 0x8a, 0x1f, 0x1b, 0x30, 0xc0, 0xa7, 0x1e, 0xe3, 0xc2, 0x32, 0xab, 0xac, 0x53, 0xe4, 0x10, 0x4e, 0x85, 0xb7, 0x22, 0xe6, 0xee, 0x5c, 0x7e, 0xbd, 0xdf, 0xe6, 0x2e, 0x64, 0xf8, 0xc5, 0xcb, 0xe2, 0x11, 0x77, 0x56, 0xca, 0x1a, 0xdb, 0xe4, 0x85, 0xef, 0xd1, 0x91, 0xd7, 0xd7, 0x95, 0x32, 0xd8, 0x7e, 0xd3, 0x16, 0x9d, 0xe8, 0xd3, 0xa0, 0xc3, 0x19, 0xa1, 0xb3, 0xd3, 0x6b, 0xb1, 0x29, 0xa4, 0xf7, 0xd2, 0xc0, 0x9d, 0x1f, 0xa8, 0x27, 0xd1, 0xe0, 0x87, 0x8f, 0xaa, 0x61, 0xd1, 0xd8, 0x71, 0x5b, 0xac, 0x81, 0xd1, 0x3c, 0x5b, 0x4a, 0xae, 0xcc, 0xd1, 0xda, 0x44, 0xc7, 0xb1, 0x57, 0xd5, 0xb1, 0x2f, 0x8c, 0xb3, 0x0c, 0xd5, 0x3e, 0x25, 0x04, 0x34, 0x2e, 0x9b, 0x84, 0xe2, 0x14, 0x40, 0x57, 0xa0, 0x6b, 0xe3, 0x1b, 0x5a, 0x6b, 0xaa, 0x35, 0xe5, 0x98, 0x66, 0x0e, 0xb2, 0x5f, 0xe5, 0x44, 0x75, 0xa6, 0xb5, 0xc6, 0xde, 0x52, 0x86, 0xeb, 0xbe, 0x9b, 0xd8, 0x97, 0x96, 0x49, 0xc4, 0xae, 0xd5, 0xc4, 0xa5, 0xd0, 0xcb, 0x4c, 0xd0, 0x6c, 0xac, 0xc4, 0xc9, 0xf8, 0xc0, 0x37, 0xb1, 0xf8, 0xc9, 0x1a, 0xae, 0xaa, 0xb5, 0x21, 0xc8, 0x5c, 0x9b, 0x1a, 0xb7, 0x2a, 0xc8, 0x4e, 0x87, 0x43, 0xb8, 0x82, 0xc9, 0x16, 0x72, 0xc3, 0xb9, 0x5a, 0xca, 0xf9, 0x5e, 0x8d, 0xbb, 0x12, 0xcc, 0x92, 0x4a, 0x96, 0xbc, 0xad, 0xcf, 0x51, 0x38, 0x1e, 0xbe, 0x81, 0xd2, 0x2a, 0x26, 0xb9, 0x41, 0x97, 0x90, 0xce, 0xe4, 0x92, 0x5a, 0xf6, 0x95, 0x93, 0xe3, 0x0a, 0x62, 0x9b, 0x9c, 0xc6, 0xe4, 0x8e, 0x77, 0xb5, 0xa5, 0x95, 0xe6, 0x13, 0x87, 0xc2, 0xaa, 0xc2, 0xdf, 0xed, 0x98, 0x67, 0xb2, 0x69, 0xda, 0x09, 0xa7, 0x79, 0xb9, 0xa9, 0xd5, 0xd1, 0xb6, 0xa9, 0xc0, 0x88, 0xd0, 0x34, 0xbe, 0x5c, 0xbf, 0x7f, 0xbf, 0xb0, 0xc0, 0xe4, 0xc0, 0x0c, 0xad, 0x47, 0xc2, 0x74, 0xbf, 0xd4, 0x99, 0xd9, 0xc3, 0x71, 0xc0, 0x0a, 0x86, 0x59, 0xc4, 0x60, 0xc1, 0x66, 0x73, 0x63, 0xc5, 0x59, 0xc3, 0x52, 0x60, 0xc9, 0xc6, 0x49, 0xc5, 0xe5, 0x4f, 0xfa, 0xc7, 0x15, 0xc7, 0xb5, 0x3d, 0xb6, 0xc8, 0x3f, 0xc8, 0xfc, 0x2b, 0x11, 0x5a, 0x44, 0x83, 0x9d, 0xe5, 0xfe, 0x64, 0xeb, 0x88, 0x97, 0xe3, 0x69, 0x77, 0x14, 0x8f, 0xf0, 0xe5, 0x74, 0x8a, 0x7e, 0x99, 0x44, 0xe5, 0xa5, 0x9a, 0x9c, 0xa1, 0x99, 0xe2, 0x90, 0xaa, 0x89, 0xa8, 0xd8, 0xdc, 0x8f, 0xb8, 0xd5, 0xb0, 0x55, 0xd8, 0x4e, 0xc4, 0xb4, 0xb7, 0x3d, 0xd0, 0xec, 0xc9, 0x83, 0xb7, 0x33, 0xbf, 0xa5, 0xcb, 0xc3, 0xb6, 0x70, 0xac, 0xc0, 0xcd, 0x54, 0xb6, 0x07, 0x99, 0x89, 0xce, 0x1b, 0xb6, 0x40, 0x86, 0x7a, 0xce, 0xc8, 0xb6, 0x9b, 0x73, 0x66, 0xcf, 0x74, 0xb7, 0xc3, 0x60, 0xc6, 0xd0, 0x1d, 0xb9, 0x51, 0x4e, 0x52, 0xd0, 0xce, 0xba, 0xcc, 0x3b, 0xe1, 0xd2, 0x05, 0xbd, 0x15, 0x2a, 0x88, 0x6c, 0x08, 0x78, 0xa3, 0xe9, 0x1f, 0x75, 0xd2, 0x7d, 0xfc, 0xe8, 0xca, 0x89, 0x28, 0x86, 0x0b, 0xe6, 0xb9, 0x98, 0x5e, 0x8e, 0xb8, 0xe6, 0x5a, 0xae, 0x99, 0x9a, 0x74, 0xe6, 0xa2, 0xba, 0x37, 0xa1, 0x18, 0xe1, 0xee, 0xc8, 0xaa, 0xa9, 0x0b, 0xdb, 0xab, 0xd3, 0x5a, 0xaf, 0xd7, 0xd4, 0xed, 0xd4, 0xe2, 0xad, 0xf6, 0xc0, 0xa0, 0xd7, 0xa1, 0xac, 0xfa, 0xad, 0xb1, 0xd8, 0x28, 0xab, 0xd6, 0x9a, 0x01, 0xd7, 0xea, 0xaa, 0xc8, 0x86, 0x20, 0xd8, 0x43, 0xaa, 0x2a, 0x72, 0xfe, 0xd6, 0x2e, 0xa8, 0x61, 0x5d, 0xeb, 0xd4, 0xe3, 0xa8, 0x32, 0x48, 0xfb, 0xd3, 0x4a, 0xa7, 0xde, 0x34, 0xda, 0xd3, 0x47, 0xad, 0x56, 0x2b, 0xfd, 0x7d, 0x27, 0x6c, 0x75, 0xec, 0x2b, 0x89, 0xf7, 0x75, 0x1e, 0xec, 0x66, 0x94, 0xf7, 0x7a, 0x73, 0xea, 0x75, 0xa9, 0x4b, 0x85, 0xd0, 0xe9, 0xa6, 0xbc, 0xef, 0x91, 0x01, 0xeb, 0x33, 0xcb, 0xcc, 0x9b, 0x01, 0xe8, 0x7a, 0xd4, 0xd1, 0x9f, 0x43, 0xdc, 0xf6, 0xda, 0xa7, 0xa4, 0x06, 0xd4, 0xb7, 0xde, 0xb5, 0xa3, 0x14, 0xc0, 0x52, 0xdd, 0xdd, 0x9f, 0xd9, 0xa9, 0xf8, 0xdb, 0x62, 0x9c, 0x63, 0x93, 0xdc, 0xd8, 0xec, 0x98, 0xe5, 0x7e, 0x9b, 0xd6, 0x2a, 0x96, 0x27, 0x69, 0x27, 0xd4, 0x9b, 0x95, 0x53, 0x53, 0xd7, 0xd3, 0x80, 0x94, 0xdd, 0x3e, 0x1f, 0xd3, 0xaa, 0x98, 0x94, 0x31, 0xc7, 0xd3, 0x50, 0x9d, 0x68, 0x2b, 0x7a, 0x8a, 0xd8, 0x62, 0x1e, 0xf0, 0x9a, 0x98, 0x6a, 0x6a, 0x23, 0xeb, 0x04, 0xa4, 0xbe, 0x6e, 0x9c, 0xea, 0x44, 0xb6, 0x2d, 0x7a, 0x25, 0xe8, 0xff, 0xc7, 0x6c, 0x84, 0x3f, 0xea, 0xa5, 0xd3, 0x8f, 0x8b, 0xba, 0xe2, 0x84, 0xd7, 0xd8, 0x8f, 0xd0, 0xdb, 0x24, 0xdb, 0x56, 0x92, 0xfc, 0xcd, 0x81, 0xdd, 0x3b, 0x90, 0x0d, 0xb7, 0x69, 0xdc, 0xc7, 0x8c, 0xfd, 0x9f, 0xfd, 0xda, 0xe2, 0x89, 0xee, 0x8a, 0x5e, 0xd9, 0x79, 0x87, 0x2e, 0x75, 0xce, 0xd7, 0x7b, 0x85, 0x1d, 0x5f, 0xfc, 0xd5, 0xf9, 0x84, 0xe6, 0x4a, 0xe2, 0xd4, 0xcd, 0x86, 0x14, 0x35, 0xb9, 0xd3, 0x9e, 0x8c, 0x92, 0x2f, 0x3b, 0xd3, 0xdd, 0x91, 0x2c, 0x2b, 0x82, 0x9a, 0xe5, 0x52, 0xb2, 0xee, 0xc8, 0xa6, 0x74, 0x61, 0xf6, 0xe9, 0x4f, 0xb6, 0x97, 0x63, 0x8d, 0xe8, 0xc4, 0xc2, 0x6a, 0x6d, 0xd1, 0xe9, 0x4e, 0xd3, 0x80, 0x78, 0x18, 0xe9, 0xaa, 0xd7, 0x3c, 0x7a, 0xf2, 0xdd, 0xbc, 0xd8, 0x13, 0x7f, 0x06, 0xd8, 0x5e, 0xdb, 0xe2, 0x80, 0x03, 0xc8, 0xa0, 0xdc, 0xe5, 0x7d, 0x16, 0xb0, 0xe1, 0xdd, 0x70, 0x78, 0x9f, 0x99, 0x35, 0xdb, 0x86, 0x75, 0x96, 0x82, 0x5c, 0xda, 0xd1, 0x73, 0x88, 0x6d, 0xcd, 0xd9, 0xbd, 0x74, 0x01, 0x59, 0x2e, 0xd8, 0xb6, 0x75, 0x1b, 0x43, 0xe6, 0xd7, 0x50, 0x79, 0x52, 0x33, 0xe3, 0xd5, 0x74, 0x80, 0x87, 0x2e, 0x68, 0xd4, 0x13, 0x88, 0x4b, 0x2a, 0x35, 0xa7, 0x9e, 0x4b, 0x1c, 0xed, 0x59, 0xaf, 0x4a, 0x4e, 0xac, 0xec, 0xbb, 0xc0, 0x56, 0x5c, 0xf8, 0xe9, 0x54, 0xce, 0xf4, 0x62, 0x57, 0xe9, 0x91, 0xd8, 0x43, 0x63, 0x90, 0xe2, 0x6c, 0xd8, 0x33, 0x68, 0xd2, 0xdc, 0x43, 0xd8, 0xe6, 0x6e, 0x72, 0xd7, 0x9b, 0xdc, 0xf2, 0x6b, 0x79, 0xc3, 0x45, 0xde, 0x11, 0x64, 0x7c, 0xa6, 0x5c, 0xde, 0x80, 0x61, 0xe2, 0x91, 0x44, 0xde, 0x43, 0x60, 0x6e, 0x7d, 0x22, 0xdb, 0x17, 0x60, 0x8d, 0x67, 0x6d, 0xde, 0xbb, 0x5e, 0x5c, 0x52, 0x8e, 0xdf, 0x09, 0x63, 0x9e, 0x3c, 0xbd, 0xdd, 0x2e, 0x69, 0x06, 0x32, 0x60, 0xd6, 0xef, 0x73, 0x9c, 0x2d, 0x31, 0xd4, 0xe3, 0x7b, 0xd1, 0x2a, 0x85, 0xb0, 0x0a, 0x37, 0x31, 0xec, 0xc9, 0xb9, 0xb6, 0x3f, 0xc3, 0xeb, 0x26, 0xcd, 0xdd, 0x4d, 0x2d, 0xec, 0x60, 0xd7, 0x4b, 0x53, 0xf5, 0xe1, 0xf2, 0xd6, 0x5e, 0x57, 0x10, 0xdc, 0x9f, 0xd8, 0xfd, 0x5d, 0x83, 0xdb, 0x50, 0xda, 0x44, 0x5d, 0x48, 0xd7, 0xc2, 0xdd, 0xad, 0x58, 0xd7, 0xbd, 0x7c, 0xde, 0xb7, 0x52, 0xed, 0xa2, 0x06, 0xe0, 0xce, 0x4c, 0x1f, 0x8b, 0x2b, 0xe2, 0x26, 0x48, 0x4a, 0x77, 0x56, 0xdf, 0xb8, 0x3e, 0xbb, 0x5e, 0x96, 0xdd, 0x94, 0x3d, 0xd1, 0x4a, 0x29, 0xdb, 0x00, 0x38, 0x41, 0x35, 0x5d, 0xda, 0x81, 0x59, 0xf5, 0x31, 0x27, 0xd7, 0x6c, 0x61, 0xf0, 0x2d, 0x9c, 0xd4, 0x40, 0x6e, 0xdc, 0x2a, 0xaf, 0xba, 0x26, 0x35, 0x7b, 0xe9, 0x58, 0xd1, 0x0d, 0x3a, 0x4f, 0xf0, 0x53, 0xd8, 0x69, 0x38, 0x94, 0xe6, 0x34, 0xd7, 0x99, 0x3b, 0x91, 0xdd, 0x74, 0xd9, 0xce, 0x48, 0x77, 0xdc, 0x0d, 0xdb, 0x2c, 0x4b, 0x3d, 0xda, 0x19, 0xde, 0xe9, 0x4b, 0xb7, 0xd1, 0x6f, 0xde, 0x2e, 0x42, 0xe2, 0xb5, 0x11, 0xdc, 0x21, 0x36, 0x4c, 0x95, 0x9f, 0xdc, 0xaa, 0x35, 0x85, 0x82, 0xbb, 0xdb, 0x81, 0x33, 0xfc, 0x6f, 0x5e, 0xda, 0xb8, 0x33, 0x62, 0x5c, 0xc6, 0xdb, 0x2e, 0x32, 0x55, 0x47, 0x6d, 0xd6, 0xe5, 0x31, 0x62, 0x32, 0xfb, 0xd4, 0x23, 0x31, 0x20, 0x30, 0x3b, 0xd6, 0x4b, 0x4f, 0x75, 0x2e, 0x17, 0xd5, 0x82, 0x5d, 0x2d, 0x2b, 0xe9, 0x24, 0x01, 0xd5, 0x53, 0xe1, 0xed, 0x21, 0xf1, 0xdf, 0x60, 0xe3, 0xff, 0x22, 0x65, 0xe3, 0x4d, 0xe3, 0x74, 0x25, 0x00, 0xe3, 0x6d, 0xdf, 0xfd, 0x27, 0xe8, 0xe0, 0x37, 0xd5, 0x24, 0x29, 0x73, 0xde, 0xcc, 0xcb, 0x6f, 0x2a, 0x64, 0xdc, 0x0c, 0xc3, 0x5c, 0x2e, 0x6d, 0xd4, 0x01, 0xae, 0xe2, 0x2f, 0x1a, 0xce, 0xa2, 0x9f, 0x13, 0x2f, 0x28, 0xcc, 0x20, 0x91, 0x08, 0x2e, 0xaa, 0xcb, 0x65, 0x83, 0x6a, 0x2e, 0x26, 0xcc, 0xfb, 0x71, 0xde, 0x2d, 0x74, 0xce, 0xcb, 0x5f, 0x4b, 0x2d, 0x00, 0xd1, 0x81, 0x31, 0x6a, 0x2d, 0x1b, 0xd1, 0x49, 0x2f, 0x68, 0x2d, 0x35, 0xd1, 0x16, 0x2d, 0x87, 0x2d, 0x52, 0xd0, 0xe6, 0x2b, 0xc6, 0x25, 0x44, 0xce, 0x53, 0xe2, 0x2d, 0x26, 0x0e, 0xd5, 0xfb, 0xe2, 0x87, 0x24, 0x57, 0xe1, 0xaa, 0xe4, 0xd0, 0x25, 0x32, 0xe4, 0x41, 0xe3, 0xd4, 0x28, 0xc6, 0xe3, 0xae, 0xdc, 0xd6, 0x2b, 0x01, 0xe0, 0xd0, 0xd3, 0x2c, 0x2b, 0x0e, 0xdf, 0x86, 0xc9, 0x7f, 0x2f, 0x22, 0xd7, 0xcd, 0xb6, 0xc9, 0x2f, 0xf3, 0xd0, 0xd9, 0xa4, 0x54, 0x2f, 0x9b, 0xcc, 0x3e, 0x94, 0x28, 0x2e, 0xb1, 0xcb, 0x45, 0x81, 0x3b, 0x2d, 0x91, 0xcd, 0xa2, 0x6b, 0xb1, 0x30, 0xd5, 0xd1, 0x2b, 0x53, 0x93, 0x2e, 0xd0, 0xd2, 0x48, 0x31, 0xac, 0x2e, 0xe3, 0xd1, 0xfe, 0x2f, 0x3e, 0x2e, 0xf6, 0xd1, 0xba, 0x2d, 0x02, 0x51, 0x0e, 0xd9, 0x16, 0x24, 0xf7, 0x26, 0xe1, 0xcb, 0xa8, 0xe2, 0xfe, 0x27, 0xfd, 0xce, 0xfc, 0xe3, 0x60, 0x28, 0xfd, 0xd6, 0xe3, 0xe3, 0x58, 0x28, 0x06, 0xe3, 0xc4, 0xe5, 0x29, 0x29, 0xd8, 0xe4, 0x60, 0xe3, 0x28, 0x2c, 0x2b, 0xe3, 0xff, 0xd9, 0x65, 0x2f, 0x97, 0xe1, 0x1d, 0xcd, 0xef, 0x31, 0x19, 0xdd, 0x52, 0xc1, 0x26, 0x32, 0x8e, 0xd3, 0x96, 0xab, 0x52, 0x31, 0xba, 0xcf, 0x73, 0x96, 0x1a, 0x2e, 0xd2, 0xc8, 0xd6, 0x7e, 0x22, 0x31, 0x69, 0xcc, 0x62, 0x64, 0xfb, 0x32, 0x8d, 0xd5, 0x92, 0x41, 0xf4, 0x31, 0xe7, 0xd2, 0x26, 0x32, 0x7d, 0x49, 0x28, 0xd9, 0x84, 0x2c, 0xc3, 0x57, 0x59, 0xda, 0xa3, 0x28, 0x3b, 0x80, 0x07, 0xda, 0xde, 0x25, 0xea, 0x27, 0x8a, 0xc5, 0x8d, 0xe6, 0xa8, 0x29, 0x7f, 0xcc, 0x66, 0xe4, 0x1f, 0x2b, 0xa6, 0xcf, 0xfe, 0xe4, 0x6f, 0x2b, 0x87, 0xdb, 0x50, 0xe6, 0x73, 0x2e, 0x04, 0xe3, 0xf3, 0xe4, 0xd2, 0x34, 0x53, 0xe3, 0x8b, 0xe0, 0x72, 0x3d, 0x1c, 0xe5, 0xa5, 0xd8, 0xdb, 0x53, 0x3b, 0xe4, 0x35, 0xcd, 0x4e, 0x5b, 0x40, 0xe2, 0xd3, 0xbd, 0xf7, 0x5d, 0x3e, 0xe0, 0x39, 0xaa, 0x26, 0x5f, 0x78, 0xdf, 0x82, 0x90, 0xd0, 0x5a, 0x70, 0xe0, 0x09, 0x72, 0xd0, 0x5c, 0xe3, 0xdf, 0x57, 0x53, 0x9c, 0x5b, 0xc7, 0xdc, 0xcd, 0x38, 0x0b, 0x72, 0xac, 0xe0, 0x3f, 0x2e, 0x69, 0x83, 0xba, 0xde, 0x8b, 0x2a, 0x8f, 0x8a, 0x73, 0xda, 0xcd, 0x26, 0xf4, 0x2a, 0x6a, 0xbd, 0xa9, 0xe9, 0x5e, 0x2b, 0x71, 0xc4, 0x03, 0xe8, 0x70, 0x2d, 0x79, 0xcb, 0x8a, 0xe6, 0xbf, 0x34, 0x75, 0xd0, 0xae, 0xe4, 0x88, 0x4b, 0xb3, 0xdd, 0x93, 0xe6, 0x8f, 0x58, 0x35, 0xdf, 0xc2, 0xe0, 0x5b, 0x63, 0x09, 0xdf, 0x6f, 0xd9, 0xb2, 0x71, 0x1c, 0xe0, 0xc2, 0xcf, 0x7f, 0x77, 0x6f, 0xe3, 0xd9, 0xc5, 0x0f, 0x79, 0xf3, 0xe3, 0xfc, 0xb2, 0x7f, 0x7f, 0xa6, 0xdf, 0x81, 0x9b, 0xac, 0x82, 0x9a, 0xde, 0xb9, 0x82, 0xc8, 0x82, 0x67, 0xe3, 0x21, 0x65, 0x05, 0x8a, 0x9e, 0xe0, 0x26, 0x4d, 0x94, 0x8e, 0x16, 0xdf, 0x14, 0x38, 0xc0, 0x9a, 0x8f, 0xdb, 0xc3, 0x2a, 0xcd, 0xa3, 0x06, 0xdd, 0x22, 0x25, 0xd3, 0x2e, 0x9c, 0xb6, 0x07, 0xe6, 0xd7, 0x30, 0x37, 0xb9, 0x2d, 0xe8, 0x63, 0x33, 0x22, 0xc2, 0x2e, 0xe9, 0x9b, 0x59, 0x93, 0xcc, 0x5c, 0xe5, 0xa2, 0x5c, 0xce, 0xd3, 0x84, 0xe6, 0x2b, 0x6c, 0x54, 0xda, 0x51, 0xe1, 0x73, 0x7a, 0x58, 0xdb, 0x82, 0xdb, 0x17, 0x84, 0xfb, 0xde, 0xcc, 0xd3, 0x3a, 0x8f, 0x43, 0xe2, 0x94, 0xc8, 0xe3, 0x94, 0x6e, 0xe0, 0x08, 0xb6, 0x6e, 0x98, 0x67, 0xde, 0x49, 0xa1, 0xcf, 0x9a, 0x8a, 0xdd, 0x9f, 0x89, 0xf4, 0x9e, 0x5c, 0xdc, 0x6f, 0x72, 0x47, 0xa2, 0x01, 0xdb, 0xb3, 0x5a, 0x40, 0xa5, 0xbf, 0xd9, 0x21, 0x41, 0xc5, 0xa9, 0xe9, 0xda, 0xeb, 0x2f, 0x77, 0xab, 0xef, 0xd8, 0xfb, 0x25, 0x21, 0x31, 0x94, 0xa9, 0x22, 0xe4, 0x56, 0x35, 0xf5, 0xaf, 0xa7, 0xe6, 0x0e, 0x53, 0x58, 0xb7, 0xdf, 0xe6, 0xed, 0x5e, 0x71, 0xbf, 0x59, 0xe5, 0x27, 0x6b, 0xf5, 0xc6, 0x23, 0xdf, 0xb4, 0x7d, 0xaa, 0xca, 0x78, 0xda, 0x20, 0x8b, 0x0d, 0xd2, 0xc1, 0xd6, 0x78, 0x99, 0x04, 0xd6, 0x42, 0xd3, 0x4b, 0xa7, 0xf3, 0xde, 0x1f, 0xcd, 0xb6, 0xac, 0x92, 0xdd, 0x33, 0xbb, 0x25, 0xb0, 0x0e, 0xdc, 0x44, 0xa7, 0xf7, 0xb2, 0x54, 0xdb, 0xd4, 0x93, 0x0d, 0xb4, 0x63, 0xdc, 0x16, 0x7d, 0xc7, 0xb4, 0x68, 0xd7, 0xf7, 0x62, 0xd8, 0xb5, 0x27, 0xd5, 0xb6, 0x4a, 0xa1, 0xb6, 0x67, 0xd8, 0xf5, 0x35, 0x1b, 0xb8, 0x30, 0xd7, 0xed, 0x2b, 0xca, 0x33, 0xe2, 0x9e, 0xd4, 0xe4, 0x99, 0x4e, 0x06, 0xa4, 0x8b, 0xe4, 0x57, 0x5c, 0x6b, 0xac, 0xfd, 0xe4, 0xd2, 0x6d, 0x0a, 0xb3, 0xc3, 0xe2, 0xcb, 0x79, 0x5d, 0xb7, 0xad, 0xdd, 0x08, 0x89, 0xd5, 0xc0, 0x8f, 0xd7, 0x57, 0x9a, 0x3b, 0xc5, 0xea, 0xd4, 0x8c, 0xa9, 0xf1, 0xcd, 0x26, 0xce, 0xc1, 0xb6, 0x99, 0xd3, 0xae, 0xca, 0x0f, 0xbb, 0xb2, 0xd3, 0xdd, 0xb9, 0x5f, 0xbd, 0xee, 0xd3, 0xce, 0xa6, 0x34, 0xc0, 0x54, 0xd3, 0x6b, 0x92, 0x5c, 0xc1, 0x7b, 0xd4, 0x60, 0x7d, 0xa7, 0xc3, 0x3f, 0xd5, 0x9c, 0x68, 0x05, 0xc4, 0xde, 0xd7, 0x24, 0x53, 0xdb, 0xc4, 0x40, 0xd7, 0x79, 0x3f, 0x1e, 0xc4, 0x8e, 0xd5, 0xc0, 0x2f, 0x12, 0x4c, 0x3d, 0x93, 0x7b, 0xe4, 0x75, 0x5d, 0x03, 0x98, 0x5c, 0xe3, 0x9c, 0x6b, 0x7d, 0xa0, 0x60, 0xe5, 0x37, 0x7d, 0xb8, 0xa7, 0x50, 0xe4, 0x31, 0x8b, 0x64, 0xac, 0xa9, 0xde, 0x54, 0x9b, 0x2d, 0xb4, 0x96, 0xd9, 0x53, 0xa9, 0xd8, 0xbb, 0xc2, 0xd4, 0xcb, 0xbb, 0x0e, 0xc2, 0xcf, 0xce, 0x4d, 0xc9, 0xae, 0xc9, 0xae, 0xca, 0x3d, 0xc9, 0x6c, 0xca, 0x9c, 0xb7, 0xbe, 0xca, 0xe8, 0xca, 0xf5, 0xa4, 0x9d, 0xcc, 0x61, 0xcb, 0xa2, 0x91, 0x50, 0xcd, 0x59, 0xcc, 0xe2, 0x7d, 0xf4, 0xce, 0x7c, 0xce, 0xb4, 0x6a, 0x57, 0xce, 0xe3, 0xd0, 0x78, 0x58, 0x0d, 0xcf, 0xb1, 0xd2, 0x35, 0x45, 0xf1, 0xd0, 0xa4, 0xd3, 0x7a, 0x33, 0x80, 0x5b, 0x98, 0x86, 0x8f, 0xe6, 0x24, 0x6a, 0x92, 0x8a, 0xe6, 0xe4, 0x05, 0x7f, 0xe1, 0x93, 0x5f, 0xe6, 0x2e, 0x8d, 0xd8, 0x9c, 0x6d, 0xe5, 0xe4, 0x9e, 0x3b, 0xa3, 0xe7, 0xe1, 0x1e, 0xad, 0xb3, 0xab, 0x34, 0xdb, 0x25, 0xba, 0xb5, 0xb2, 0xce, 0xd6, 0x47, 0xc8, 0x2e, 0xb9, 0xa5, 0xce, 0x64, 0xd0, 0x5e, 0xbf, 0xb5, 0xc7, 0x72, 0xd5, 0xab, 0xc1, 0x31, 0xb7, 0x20, 0xd7, 0x44, 0xc1, 0x80, 0xa4, 0x18, 0xd7, 0x82, 0xc1, 0xa1, 0x90, 0xc5, 0xd7, 0xfe, 0xc2, 0x42, 0x7d, 0x36, 0xd6, 0x4e, 0xc1, 0x0d, 0x68, 0x20, 0xd4, 0x81, 0xc0, 0xc3, 0x53, 0xc7, 0xd2, 0xd7, 0xbf, 0xfc, 0x3f, 0x1f, 0xd1, 0xe5, 0xc2, 0x6a, 0x30, 0xd1, 0x6d, 0xdc, 0x7a, 0x75, 0xe8, 0x41, 0x82, 0x73, 0x81, 0x8d, 0xe7, 0x22, 0x8a, 0x88, 0x89, 0x3b, 0xe6, 0xa4, 0x9d, 0xad, 0x92, 0xb5, 0xe6, 0xb5, 0xb1, 0x31, 0x9d, 0x17, 0xe4, 0xb9, 0xbc, 0x79, 0xa3, 0x6e, 0xdf, 0x64, 0xcb, 0x6f, 0xab, 0x6d, 0xd9, 0xd0, 0xd2, 0x76, 0xb2, 0x15, 0xd3, 0x73, 0xdc, 0x44, 0xb7, 0x05, 0xc8, 0xc4, 0xde, 0xda, 0xb5, 0xe8, 0xb5, 0x93, 0xdd, 0x2e, 0xb3, 0x84, 0xa0, 0x21, 0xd9, 0xf0, 0xaf, 0xdd, 0x89, 0x94, 0xd8, 0x17, 0xad, 0x65, 0x74, 0x78, 0xd6, 0x15, 0xab, 0xb3, 0x5f, 0x82, 0xd4, 0xeb, 0xab, 0xed, 0x4a, 0xd5, 0xd3, 0xdf, 0xab, 0xd2, 0x36, 0x9a, 0xd3, 0x0a, 0xb2, 0x5d, 0x30, 0x06, 0x81, 0xc0, 0x71, 0x59, 0xeb, 0x69, 0x8b, 0x07, 0x77, 0xe6, 0xeb, 0x84, 0x98, 0x1e, 0x7e, 0x4c, 0xea, 0x1c, 0xae, 0x51, 0x8a, 0x80, 0xe9, 0xa1, 0xc0, 0x8f, 0x95, 0x2f, 0xeb, 0x6d, 0xce, 0x25, 0x9d, 0xad, 0xe5, 0xff, 0xd5, 0xa0, 0xa1, 0xcd, 0xdc, 0x36, 0xd9, 0x64, 0xa7, 0x39, 0xd6, 0x0e, 0xde, 0xd0, 0xa7, 0x02, 0xc3, 0x0a, 0xde, 0x1b, 0xa3, 0x88, 0xac, 0x31, 0xdc, 0x40, 0xa0, 0x3f, 0x96, 0x51, 0xd9, 0x5a, 0x9c, 0x6b, 0x80, 0x51, 0xd7, 0xcb, 0x9a, 0xa0, 0x6b, 0x92, 0xd5, 0x50, 0x98, 0xa4, 0x56, 0x5a, 0xd4, 0x68, 0x99, 0x5d, 0x42, 0x57, 0xd3, 0x91, 0x9b, 0xef, 0x33, 0x50, 0xd3, 0x5a, 0xa1, 0x2f, 0x2e, 0x61, 0x8d, 0xfc, 0x63, 0xf7, 0xed, 0x65, 0x99, 0x7b, 0x6d, 0x10, 0xeb, 0x3f, 0xa8, 0x5c, 0x71, 0x5e, 0xea, 0x35, 0xb9, 0x9b, 0x7e, 0x8d, 0xe9, 0xbb, 0xcb, 0x95, 0x89, 0x06, 0xeb, 0x13, 0xd5, 0x41, 0x8e, 0x8e, 0xdf, 0x9f, 0xd7, 0x82, 0x93, 0x85, 0xd9, 0xfc, 0xdb, 0x08, 0x96, 0xed, 0xd0, 0x2c, 0xdc, 0xb0, 0x94, 0x9f, 0xb9, 0x42, 0xdc, 0xfa, 0x91, 0x3b, 0xa2, 0xc8, 0xdb, 0x03, 0x8d, 0xac, 0x8c, 0x4a, 0xd9, 0x5c, 0x8b, 0xd2, 0x78, 0x47, 0xd6, 0xd7, 0x89, 0x26, 0x62, 0x75, 0xd5, 0x8c, 0x88, 0x99, 0x4c, 0xcd, 0xd4, 0x3e, 0x8a, 0x28, 0x38, 0x4a, 0xd3, 0x47, 0x8f, 0xf5, 0x30, 0xc8, 0xd3, 0xb7, 0x94, 0xd3, 0x2d, 0xcd, 0x9e, 0xbb, 0x5f, 0x2a, 0xec, 0x73, 0xa9, 0x83, 0x63, 0x02, 0xe9, 0x21, 0xb7, 0x30, 0x65, 0x6a, 0xe9, 0x08, 0xc4, 0xcb, 0x72, 0x04, 0xe8, 0x67, 0xd3, 0xae, 0x7b, 0xa3, 0xe8, 0x1a, 0xd6, 0xd0, 0x7e, 0x79, 0xdc, 0x60, 0xd8, 0x1d, 0x82, 0xce, 0xd8, 0xc4, 0xdb, 0xd2, 0x84, 0xdd, 0xcb, 0x41, 0xdc, 0xed, 0x80, 0x03, 0xb2, 0x13, 0xdd, 0x8a, 0x7d, 0xa3, 0x9b, 0xbf, 0xdb, 0xe7, 0x7a, 0x1e, 0x84, 0xb7, 0xda, 0xa3, 0x78, 0x60, 0x70, 0x2a, 0xd9, 0x46, 0x78, 0x1c, 0x5a, 0xed, 0xd7, 0xbe, 0x7a, 0xcc, 0x47, 0x86, 0xd6, 0xb1, 0x7d, 0x36, 0x34, 0xd4, 0xd4, 0xed, 0x84, 0x38, 0x30, 0x00, 0xd3, 0x9e, 0x8b, 0xf1, 0x2c, 0x49, 0xa8, 0x66, 0x4c, 0x4d, 0xed, 0x18, 0xb6, 0xd2, 0x5c, 0xd3, 0xe9, 0x42, 0xc2, 0xf5, 0x61, 0x73, 0xe9, 0xac, 0xd1, 0x4d, 0x63, 0xa5, 0xe9, 0xa6, 0xd7, 0x6a, 0x67, 0xdb, 0xdf, 0x89, 0xd7, 0xff, 0x6d, 0x07, 0xdb, 0xae, 0xd8, 0xd1, 0x71, 0x63, 0xd7, 0xbd, 0xdc, 0xb2, 0x70, 0xf1, 0xc5, 0xb6, 0xdd, 0xf7, 0x69, 0x57, 0xa8, 0x91, 0xdd, 0xdb, 0x67, 0x21, 0x92, 0x69, 0xdd, 0x56, 0x66, 0x32, 0x7f, 0x47, 0xda, 0xb2, 0x66, 0x6b, 0x69, 0xff, 0xde, 0x43, 0x63, 0xa9, 0x54, 0x36, 0xdd, 0xbe, 0x67, 0x5a, 0x3e, 0xb9, 0xd9, 0x7c, 0x6e, 0xcf, 0x33, 0x58, 0xd7, 0x76, 0x77, 0xc1, 0x2f, 0x09, 0xd5, 0x76, 0x80, 0x05, 0x2b, 0xca, 0xb2, 0xb5, 0x39, 0xca, 0xef, 0x82, 0xc7, 0x7f, 0x4b, 0xcc, 0xed, 0x75, 0xcd, 0x74, 0x50, 0x8b, 0xeb, 0x72, 0xd7, 0x00, 0x55, 0xab, 0xde, 0x93, 0xd7, 0xc7, 0x5d, 0xc2, 0xdd, 0xa1, 0xd9, 0x2f, 0x5e, 0x8b, 0xdb, 0x78, 0xd9, 0xbd, 0x61, 0x96, 0xd7, 0xc4, 0xdd, 0x72, 0x5e, 0x91, 0xc1, 0x07, 0xde, 0xdd, 0x57, 0x2c, 0xa4, 0xa7, 0xde, 0x30, 0x53, 0xb7, 0x8d, 0xae, 0xdf, 0x0e, 0x50, 0xa9, 0x79, 0xee, 0xe1, 0xfa, 0x4a, 0xc2, 0x64, 0x54, 0xe0, 0x65, 0x49, 0xc3, 0x50, 0x35, 0xde, 0x52, 0x4a, 0xef, 0x37, 0xaf, 0xdc, 0x99, 0x60, 0x09, 0x32, 0x62, 0xd9, 0x1d, 0x67, 0x51, 0x2e, 0x8d, 0xd5, 0x63, 0x72, 0xac, 0x2b, 0x8b, 0xbb, 0xbd, 0x36, 0xe3, 0xea, 0xb6, 0xd2, 0x11, 0x3e, 0x58, 0xf0, 0x52, 0xd8, 0x72, 0x3b, 0xd7, 0xe5, 0x11, 0xd8, 0xea, 0x49, 0x2d, 0xdd, 0xa3, 0xda, 0x19, 0x4c, 0xa0, 0xdc, 0x2e, 0xdb, 0xb7, 0x51, 0x5a, 0xda, 0xab, 0xdc, 0x0e, 0x56, 0x45, 0xd3, 0x15, 0xdf, 0x8e, 0x4a, 0x1c, 0xb9, 0xfe, 0xdd, 0xf1, 0x3c, 0x7b, 0x98, 0x44, 0xdd, 0x0f, 0x36, 0xb1, 0x82, 0xf8, 0xdc, 0x4d, 0x35, 0x33, 0x6f, 0xf0, 0xdb, 0x7a, 0x34, 0xba, 0x5d, 0xa1, 0xda, 0xee, 0x35, 0x8a, 0x4b, 0x5a, 0xd8, 0x00, 0x32, 0x8a, 0x33, 0xd3, 0xd4, 0xfe, 0x32, 0x03, 0x30, 0xe3, 0xd8, 0x10, 0x5b, 0x72, 0x2e, 0x8e, 0xd6, 0x29, 0x61, 0xf6, 0x2c, 0x64, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x6d, 0x42, 0x41, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x01, 0x40, 0x00, 0x00, 0x01, 0x84, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x97, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x85, 0x60, 0xff, 0xff, 0xb3, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xad, 0x0c, 0x00, 0x00, 0x1b, 0xb0, 0x00, 0x00, 0x0d, 0xaa, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x2a, 0x00, 0x00, 0x1c, 0x07, 0x00, 0x00, 0x0e, 0x2c, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x47, 0x00, 0x00, 0x1a, 0x49, 0x00, 0x00, 0x0b, 0xb0, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x66, 0xf0, 0x55, 0x90, 0x22, 0x59, 0x4e, 0xfa, 0x58, 0x7c, 0xf5, 0x33, 0x17, 0xf7, 0xfd, 0x13, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xdb, 0xff, 0xff, 0x00, 0x00, 0x2d, 0x24, 0xe8, 0x08, 0x02, 0xec, 0xff, 0xff, 0xb1, 0x05, 0xa7, 0x83, 0x0a, 0xcb, 0x99, 0x0f, 0xaa, 0x6f, 0xdd, 0xa6, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x0c, 0x12, 0x9c, 0xff, 0xfb, 0x21, 0x8d, 0x00, 0x89, 0x34, 0x39, 0x00, 0x00, 0x67, 0x52, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xc8, 0xaa, 0x3d, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x06, 0xbe, 0x75, 0xff, 0xfd, 0xbb, 0x45, 0x00, 0x4c, 0xa4, 0xf7, 0x00, 0x00, 0x56, 0x3e, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xe6, 0x38, 0x17, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x03, 0xee, 0x3c, 0xff, 0xff, 0x75, 0x4e, 0x00, 0x2c, 0xab, 0xba, 0x00, 0x00, 0x23, 0x83, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xf9, 0xd7, 0xc3, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x72, 0x6d, 0x67, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x6d, 0x6c, 0x75, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x65, 0x6e, 0x55, 0x53, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x70, 0x00, 0x79, 0x00, 0x72, 0x00, 0x69, 0x00, 0x67, 0x00, 0x68, 0x00, 0x74, 0x00, 0x20, 0x00, 0x32, 0x00, 0x30, 0x00, 0x30, 0x00, 0x37, 0x00, 0x20, 0x00, 0x49, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x65, 0x00, 0x72, 0x00, 0x6e, 0x00, 0x61, 0x00, 0x74, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x61, 0x00, 0x6c, 0x00, 0x20, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6c, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x20, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x74, 0x00, 0x69, 0x00, 0x75, 0x00, 0x6d, 0x00, 0x00, 0x73, 0x66, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x4b, 0x00, 0x00, 0x05, 0xe4, 0xff, 0xff, 0xf3, 0x28, 0x00, 0x00, 0x07, 0x9c, 0x00, 0x00, 0xfd, 0x87, 0xff, 0xff, 0xfb, 0xa1, 0xff, 0xff, 0xfd, 0xa3, 0x00, 0x00, 0x02, 0xa2, 0x00, 0x00, 0xc0, 0x8c }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (GetImageProfile(image,"icm") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icm",profile); profile=DestroyStringInfo(profile); return(status); } #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000) static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) context; if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #else static int LCMSExceptionHandler(int severity,const char *message) { (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s", severity,message != (char *) NULL ? message : "no message"); return(1); } #endif #endif MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char **arguments, *names; int number_arguments; register ssize_t i; /* Delete image profile(s). */ names=ConstantString(name); (void) SubstituteString(&names,","," "); arguments=StringToArgv(names,&number_arguments); names=DestroyString(names); if (arguments == (char **) NULL) return(MagickTrue); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { for (i=1; i < (ssize_t) number_arguments; i++) { if ((*arguments[i] == '!') && (LocaleCompare(name,arguments[i]+1) == 0)) break; if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse) { (void) DeleteImageProfile(image,name); ResetImageProfileIterator(image); break; } } name=GetNextImageProfile(image); } for (i=0; i < (ssize_t) number_arguments; i++) arguments[i]=DestroyString(arguments[i]); arguments=(char **) RelinquishMagickMemory(arguments); return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsHPROFILE source_profile; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(LCMSExceptionHandler); source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *restrict transform; cmsUInt32Number flags, source_type, target_type; ExceptionInfo *exception; int intent; MagickBooleanType status; MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; unsigned short **restrict source_pixels, **restrict target_pixels; exception=(&image->exception); target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_type=(cmsUInt32Number) TYPE_CMYK_16; source_channels=4; break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_type=(cmsUInt32Number) TYPE_GRAY_16; source_channels=1; break; } case cmsSigLabData: { source_colorspace=LabColorspace; source_type=(cmsUInt32Number) TYPE_Lab_16; source_channels=3; break; } case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; source_channels=3; break; } case cmsSigRgbData: { source_colorspace=RGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; source_type=(cmsUInt32Number) TYPE_XYZ_16; source_channels=3; break; } case cmsSigYCbCrData: { source_colorspace=YCbCrColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; source_channels=3; break; } default: { source_colorspace=UndefinedColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } } signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_type=(cmsUInt32Number) TYPE_CMYK_16; target_channels=4; break; } case cmsSigLabData: { target_colorspace=LabColorspace; target_type=(cmsUInt32Number) TYPE_Lab_16; target_channels=3; break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_type=(cmsUInt32Number) TYPE_GRAY_16; target_channels=1; break; } case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; target_channels=3; break; } case cmsSigRgbData: { target_colorspace=RGBColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; target_type=(cmsUInt32Number) TYPE_XYZ_16; target_channels=3; break; } case cmsSigYCbCrData: { target_colorspace=YCbCrColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; target_channels=3; break; } default: { target_colorspace=UndefinedColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } } if ((source_colorspace == UndefinedColorspace) || (target_colorspace == UndefinedColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == GRAYColorspace) && (IsGrayImage(image,exception) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == CMYKColorspace) && (image->colorspace != CMYKColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == XYZColorspace) && (image->colorspace != XYZColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == YCbCrColorspace) && (image->colorspace != YCbCrColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace != CMYKColorspace) && (source_colorspace != GRAYColorspace) && (source_colorspace != LabColorspace) && (source_colorspace != XYZColorspace) && (source_colorspace != YCbCrColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (unsigned short **) NULL) || (target_pixels == (unsigned short **) NULL)) { transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace); status=MagickTrue; progress=0; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; register unsigned short *p; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=ScaleQuantumToShort(GetPixelRed(q)); if (source_channels > 1) { *p++=ScaleQuantumToShort(GetPixelGreen(q)); *p++=ScaleQuantumToShort(GetPixelBlue(q)); } if (source_channels > 3) *p++=ScaleQuantumToShort(GetPixelIndex(indexes+x)); q++; } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum(*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_channels > 1) { SetPixelGreen(q,ScaleShortToQuantum(*p)); p++; SetPixelBlue(q,ScaleShortToQuantum(*p)); p++; } if (target_channels > 3) { SetPixelIndex(indexes+x,ScaleShortToQuantum(*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass) status=SetImageProfile(image,name,profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceBytes(const unsigned char *p, const ssize_t count,unsigned char *quantum) { register ssize_t i; for (i=0; i < count; i++) *quantum++=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, size_t *quantum) { *quantum=(size_t) (*p++ << 24); *quantum|=(size_t) (*p++ << 16); *quantum|=(size_t) (*p++ << 8); *quantum|=(size_t) (*p++ << 0); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++ << 8); *quantum|=(unsigned short) (*p++ << 0); return(p); } static MagickBooleanType GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; StringInfo *profile; unsigned char length_byte; size_t count; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&count); if ((p > (datum+length-count)) || (count > length)) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution. */ p=ReadResourceShort(p,&resolution)+6; image->x_resolution=(double) resolution; p=ReadResourceShort(p,&resolution)+6; image->y_resolution=(double) resolution; break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"iptc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"exif",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"xmp",profile); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(MagickTrue); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { char key[MaxTextExtent], property[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } (void) GetProfilesFromResourceBlock(image,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name); (void) GetImageProperty(image,property); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline unsigned short ReadProfileShort(const EndianType endian, unsigned char *buffer) { unsigned short value; if (endian == MSBEndian) { value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) | ((unsigned char *) buffer)[1]); return((unsigned short) (value & 0xffff)); } value=(unsigned short) ((buffer[1] << 8) | buffer[0]); return((unsigned short) (value & 0xffff)); } static inline size_t ReadProfileLong(const EndianType endian, unsigned char *buffer) { size_t value; if (endian == MSBEndian) { value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); return((size_t) (value & 0xffffffff)); } value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) | (buffer[1] << 8 ) | (buffer[0])); return((size_t) (value & 0xffffffff)); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == MSBEndian) { buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,4); return; } buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) CopyMagickMemory(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == MSBEndian) { buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,2); return; } buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) CopyMagickMemory(p,buffer,2); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; StringInfo *profile; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile == (StringInfo *) NULL) return(MagickTrue); length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4)); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format-1) >= EXIF_NUM_FORMATS) break; components=(int) ReadProfileLong(endian,q+4); number_bytes=(size_t) components*format_bytes[format]; if (number_bytes <= 4) p=q+8; else { ssize_t offset; /* The directory entry contains an offset. */ offset=(ssize_t) ((int) ReadProfileLong(endian,q+8)); if ((offset+number_bytes) < offset) continue; /* prevent overflow */ if ((size_t) (offset+number_bytes) > length) continue; p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { ssize_t offset; offset=(ssize_t) ((int) ReadProfileLong(endian,p)); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12* number_entries))); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); }
tree.c
#include "tree.h" #include "io.h" #include "sort.h" #include <omp.h> #include <stdio.h> #include <string.h> /** * @brief Instantiate a new node of the tree. * * @param key The key of the node * @param value The value of the node * @param parent The parent of the node in the tree * @return Pointer to the node created */ TreeNode *tree_node_new(int key, int value, int parent) { TreeNode *node = (TreeNode *)malloc(sizeof(TreeNode)); assert(node != NULL); node->key = key; node->value = value; node->parent = parent; node->adj = hashmap_new(); assert(node->adj != NULL); return node; } /** * @brief Free a tree node * * @param node Pointer to the node to free */ void tree_node_free(TreeNode *node) { if (node != NULL) { hashmap_free(node->adj); free(node); } } /** * @brief Instantiate a new tree * * @return The new tree */ Tree tree_new() { TreeNode *node = tree_node_new(TREE_NODE_NULL, -1, 0); Tree tree = NULL; cvector_push_back(tree, node); return tree; } /** * @brief Free the tree * * @param tree Pointer to the tree to free */ void tree_free(Tree *tree) { if (*tree != NULL) { int n_nodes = cvector_size((*tree)); int i; for (i = 0; i < n_nodes; i++) { tree_node_free((*tree)[i]); } cvector_free((*tree)); *tree = NULL; } } /** * @brief Add a node to the tree * * @param tree Pointer to the tree * @param node Pointer to the node to add * @return The id of the node in the tree */ int tree_add_node(Tree *tree, TreeNode *node) { cvector_push_back((*tree), node); assert(*tree != NULL); // the malloc has not failed int new_id = cvector_size((*tree)) - 1; TreeNode *parent = (*tree)[node->parent]; hashmap_put(parent->adj, &(node->key), sizeof(int), new_id); assert(new_id != node->parent); return new_id; } /** * @brief Add the subtree rooted in the ns(th) node of the tree * source as a child of the nd(th) node of the tree dest. The * source tree is modified, as the nodes are moved to the * destination tree. * * @param dest Pointer to the destination tree * @param source Pointer to the source tree * @param nd Id of the node in the destination tree * @param ns Id of the node in the source tree */ void tree_add_subtree(Tree *dest, Tree source, int nd, int ns) { // add node ns cvector_vector_type(hashmap_element) neighbours = NULL; hashmap_get_elements(source[ns]->adj, &neighbours); hashmap_free(source[ns]->adj); source[ns]->adj = hashmap_new(); source[ns]->parent = nd; int new_pos = tree_add_node(dest, source[ns]); int num_adj_s = cvector_size(neighbours); // recursively add children int i; for (i = 0; i < num_adj_s; i++) { assert(neighbours[i].value != ns); tree_add_subtree(dest, source, new_pos, neighbours[i].value); } source[ns] = NULL; cvector_free(neighbours); } /** * @brief Merge the subtree of dest rooted in node with id nd * with the subtree of source rooted in ns and store the result in dest. * Also the source tree is modified. * * @param dest The destination tree * @param source The source tree * @param nd Id of the node in the destination tree * @param ns Id of the node in the source tree */ void tree_merge_dfs(Tree *dest, Tree source, int nd, int ns) { int i; cvector_vector_type(hashmap_element) neighbours = NULL; hashmap_get_elements(source[ns]->adj, &neighbours); int num_adj_s = cvector_size(neighbours); // foreach neighbour of node ns in source for (i = 0; i < num_adj_s; i++) { int source_pos = neighbours[i].value; assert(ns != neighbours[i].value); // if a node with the same key(item) is already present in the // children of nd, just increment the counter int dest_pos; if (hashmap_get((*dest)[nd]->adj, neighbours[i].key, sizeof(int), &dest_pos) == MAP_OK) { (*dest)[dest_pos]->value += source[source_pos]->value; tree_merge_dfs(dest, source, dest_pos, source_pos); } else { // otherwise add the child and the subtree rooted in it to // the node nd in dest tree_add_subtree(dest, source, nd, source_pos); } } cvector_free(neighbours); } /** * @brief Merge the trees dest and source and store the result in dest. * The source tree is modified. It is a wrapper for @see tree_merge_dfs() * * @param dest The destination tree * @param source The source tree */ void tree_merge(Tree *dest, Tree source) { tree_merge_dfs(dest, source, 0, 0); } /** * @brief Inserts into the vector nodes the nodes to send * * @param tree The trees from which to get the nodes * @param nodes The vector in which the nodes are put */ void tree_get_nodes(Tree tree, cvector_vector_type(TreeNodeToSend) * nodes) { int num_nodes = cvector_size(tree); for (int i = 0; i < num_nodes; i++) { TreeNodeToSend node; node.key = tree[i]->key; node.value = tree[i]->value; node.parent = tree[i]->parent; cvector_push_back((*nodes), node); } } /** * @brief Print the tree * * @param tree The tree to print */ void tree_print(Tree tree) { int n_nodes = cvector_size(tree); for (int i = 0; i < n_nodes; i++) { printf("Node (%d: %d)\n", tree[i]->key, tree[i]->value); hashmap_print(tree[i]->adj); } } /** * @brief Build a tree given a transaction * * @param rank The rank of the process * @param world_size The number of processes in the world * @param transaction The transaction * @param index_map The map from item to the corresponding id * @param items_count The array of hashmap elements having the item string as a * key and the support count as a value * @param num_items The number of items in the sorted_indices array * @param sorted_indices The array of the sorted indices of the items * @return The built tree */ Tree tree_build_from_transaction(int rank, int world_size, Transaction *transaction, IndexMap index_map, hashmap_element *items_count, int num_items, int *sorted_indices) { int n_items = cvector_size((*transaction)); cvector_vector_type(hashmap_element) elements = NULL; for (int i = 0; i < n_items; i++) { int item_size = cvector_size((*transaction)[i]); hashmap_element element; // consider only items with support >= min_support (in index map) if (hashmap_get(index_map, (*transaction)[i], item_size, &(element.value)) == MAP_OK) { element.key_length = item_size; memcpy(element.key, (*transaction)[i], item_size); cvector_push_back(elements, element); } } transaction_free(transaction); n_items = cvector_size(elements); int *transaction_sorted_indices = (int *)malloc(n_items * sizeof(int)); sort(elements, n_items, transaction_sorted_indices, 0, n_items - 1, 1); Tree tree = tree_new(); for (int i = 0; i < n_items; i++) { assert(transaction_sorted_indices[i] >= 0); assert(transaction_sorted_indices[i] < n_items); Item item = elements[transaction_sorted_indices[i]].key; int item_size = elements[transaction_sorted_indices[i]].key_length; int pos; assert(hashmap_get(index_map, item, item_size, &pos) == MAP_OK); assert(pos >= 0); assert(pos < num_items); assert(sorted_indices[pos] >= 0); assert(sorted_indices[pos] < num_items); TreeNode *node = tree_node_new(sorted_indices[pos], 1, i); assert(node != NULL); assert(tree_add_node(&tree, node) == i + 1); } cvector_free(elements); free(transaction_sorted_indices); return tree; } /** * @brief Build a tree given a list of transactions * * First, we build the trees for the single transactions. * Then, we merge them in a binary-tree-like fashion. * * @param rank The rank of the process * @param world_size The number of processes in the world * @param transactions * @param index_map The map from item to the corresponding id * @param items_count The array of hashmap elements having the item string as a * key and the support count as a value * @param num_items The number of items in the sorted_indices array * @param sorted_indices The array of the sorted indices of the items * @param num_threads The number of threads requested to perform the building * @return The built tree */ Tree tree_build_from_transactions(int rank, int world_size, TransactionsList transactions, IndexMap index_map, hashmap_element *items_count, int num_items, int *sorted_indices, int num_threads) { int n_transactions = cvector_size(transactions); Tree *trees = (Tree *)malloc(n_transactions * sizeof(Tree)); int i, pow; #pragma omp parallel default(none) \ shared(n_transactions, trees, rank, world_size, transactions, index_map, \ items_count, num_items, sorted_indices) private(pow, i) \ num_threads(num_threads) for (pow = 1; pow < 2 * n_transactions; pow *= 2) { int start = pow == 1 ? 0 : pow / 2; #pragma omp for schedule(runtime) for (i = start; i < n_transactions; i += pow) { if (pow > 1) { // at levels > 1, merge two subtrees tree_merge(&trees[i - pow / 2], trees[i]); tree_free(&(trees[i])); } else { // at first level, build the transaction trees trees[i] = tree_build_from_transaction( rank, world_size, &(transactions[i]), index_map, items_count, num_items, sorted_indices); } } } Tree res = trees[0]; free(trees); return res; }
BlockOps.h
/***************************************************************************** * * Copyright (c) 2003-2018 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014 by Centre for Geoscience Computing (GeoComp) * *****************************************************************************/ #ifndef __PASO_BLOCKOPS_H__ #define __PASO_BLOCKOPS_H__ #include "Paso.h" #include "PasoException.h" #include <cstring> // memcpy #ifdef ESYS_HAVE_LAPACK #ifdef ESYS_MKL_LAPACK #include <mkl_lapack.h> #include <mkl_cblas.h> #else extern "C" { #include <clapack.h> #include <cblas.h> } #endif #endif namespace paso { inline void BlockOps_Cpy_N(dim_t N, double* R, const double* V) { memcpy((void*)R, (void*)V, N*sizeof(double)); } /// performs operation R=R-mat*V (V and R are not overlapping) - 2x2 inline void BlockOps_SMV_2(double* R, const double* mat, const double* V) { const double S1 = V[0]; const double S2 = V[1]; const double A11 = mat[0]; const double A12 = mat[2]; const double A21 = mat[1]; const double A22 = mat[3]; R[0] -= A11 * S1 + A12 * S2; R[1] -= A21 * S1 + A22 * S2; } /// performs operation R=R-mat*V (V and R are not overlapping) - 3x3 inline void BlockOps_SMV_3(double* R, const double* mat, const double* V) { const double S1 = V[0]; const double S2 = V[1]; const double S3 = V[2]; const double A11 = mat[0]; const double A21 = mat[1]; const double A31 = mat[2]; const double A12 = mat[3]; const double A22 = mat[4]; const double A32 = mat[5]; const double A13 = mat[6]; const double A23 = mat[7]; const double A33 = mat[8]; R[0] -= A11 * S1 + A12 * S2 + A13 * S3; R[1] -= A21 * S1 + A22 * S2 + A23 * S3; R[2] -= A31 * S1 + A32 * S2 + A33 * S3; } #define PASO_MISSING_CLAPACK throw PasoException("You need to install a LAPACK version to enable operations on block sizes > 3.") /// performs operation R=R-mat*V (V and R are not overlapping) - NxN inline void BlockOps_SMV_N(dim_t N, double* R, const double* mat, const double* V) { #ifdef ESYS_HAVE_LAPACK cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, -1., mat, N, V, 1, 1., R, 1); #else PASO_MISSING_CLAPACK; #endif } inline void BlockOps_MV_N(dim_t N, double* R, const double* mat, const double* V) { #ifdef ESYS_HAVE_LAPACK cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, 1., mat, N, V, 1, 0., R, 1); #else PASO_MISSING_CLAPACK; #endif } inline void BlockOps_invM_2(double* invA, const double* A, int* failed) { const double A11 = A[0]; const double A12 = A[2]; const double A21 = A[1]; const double A22 = A[3]; double D = A11*A22-A12*A21; if (std::abs(D) > 0) { D = 1./D; invA[0] = A22*D; invA[1] = -A21*D; invA[2] = -A12*D; invA[3] = A11*D; } else { *failed = 1; } } inline void BlockOps_invM_3(double* invA, const double* A, int* failed) { const double A11 = A[0]; const double A21 = A[1]; const double A31 = A[2]; const double A12 = A[3]; const double A22 = A[4]; const double A32 = A[5]; const double A13 = A[6]; const double A23 = A[7]; const double A33 = A[8]; double D = A11*(A22*A33-A23*A32) + A12*(A31*A23-A21*A33) + A13*(A21*A32-A31*A22); if (std::abs(D) > 0) { D = 1./D; invA[0] = (A22*A33-A23*A32)*D; invA[1] = (A31*A23-A21*A33)*D; invA[2] = (A21*A32-A31*A22)*D; invA[3] = (A13*A32-A12*A33)*D; invA[4] = (A11*A33-A31*A13)*D; invA[5] = (A12*A31-A11*A32)*D; invA[6] = (A12*A23-A13*A22)*D; invA[7] = (A13*A21-A11*A23)*D; invA[8] = (A11*A22-A12*A21)*D; } else { *failed = 1; } } /// LU factorization of NxN matrix mat with partial pivoting inline void BlockOps_invM_N(dim_t N, double* mat, index_t* pivot, int* failed) { #ifdef ESYS_HAVE_LAPACK #ifdef ESYS_MKL_LAPACK int res = 0; dgetrf(&N, &N, mat, &N, pivot, &res); if (res != 0) *failed = 1; #else int res = clapack_dgetrf(CblasColMajor, N, N, mat, N, pivot); if (res != 0) *failed = 1; #endif // ESYS_MKL_LAPACK #else PASO_MISSING_CLAPACK; #endif } /// solves system of linear equations A*X=B inline void BlockOps_solve_N(dim_t N, double* X, double* mat, index_t* pivot, int* failed) { #ifdef ESYS_HAVE_LAPACK #ifdef ESYS_MKL_LAPACK int res = 0; int ONE = 1; dgetrs("N", &N, &ONE, mat, &N, pivot, X, &N, &res); if (res != 0) *failed = 1; #else int res = clapack_dgetrs(CblasColMajor, CblasNoTrans, N, 1, mat, N, pivot, X, N); if (res != 0) *failed = 1; #endif // ESYS_MKL_LAPACK #else PASO_MISSING_CLAPACK; #endif } /// inplace matrix vector product - order 2 inline void BlockOps_MViP_2(const double* mat, double* V) { const double S1 = V[0]; const double S2 = V[1]; const double A11 = mat[0]; const double A12 = mat[2]; const double A21 = mat[1]; const double A22 = mat[3]; V[0] = A11 * S1 + A12 * S2; V[1] = A21 * S1 + A22 * S2; } /// inplace matrix vector product - order 3 inline void BlockOps_MViP_3(const double* mat, double* V) { const double S1 = V[0]; const double S2 = V[1]; const double S3 = V[2]; const double A11 = mat[0]; const double A21 = mat[1]; const double A31 = mat[2]; const double A12 = mat[3]; const double A22 = mat[4]; const double A32 = mat[5]; const double A13 = mat[6]; const double A23 = mat[7]; const double A33 = mat[8]; V[0] = A11 * S1 + A12 * S2 + A13 * S3; V[1] = A21 * S1 + A22 * S2 + A23 * S3; V[2] = A31 * S1 + A32 * S2 + A33 * S3; } inline void BlockOps_solveAll(dim_t n_block, dim_t n, double* D, index_t* pivot, double* x) { if (n_block == 1) { #pragma omp parallel for for (dim_t i=0; i<n; ++i) x[i] *= D[i]; } else if (n_block == 2) { #pragma omp parallel for for (dim_t i=0; i<n; ++i) BlockOps_MViP_2(&D[4*i], &x[2*i]); } else if (n_block == 3) { #pragma omp parallel for for (dim_t i=0; i<n; ++i) BlockOps_MViP_3(&D[9*i], &x[3*i]); } else { int failed = 0; #pragma omp parallel for for (dim_t i=0; i<n; ++i) { const dim_t block_size = n_block*n_block; BlockOps_solve_N(n_block, &x[n_block*i], &D[block_size*i], &pivot[n_block*i], &failed); } if (failed > 0) { throw PasoException("BlockOps_solveAll: solution failed."); } } } } // namespace paso #endif // __PASO_BLOCKOPS_H__
heated_plate_openmp.c
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <omp.h> # include <mpi.h> # define M 500 # define N 500 double mean; double diff, my_diff; double u[M][N]; double w[M][N]; int main ( int argc, char *argv[] ) { double epsilon = 0.001; int i, j; int iterations, iterations_print; double wtime; printf ( "\nHEATED_PLATE_OPENMP\n" ); printf ( " C/OpenMP version\n" ); printf ( " A program to solve for the steady state temperature distribution\n" ); printf ( " over a rectangular plate.\n\n" ); printf ( " Spatial grid of %d by %d points.\n", M, N ); printf ( " The iteration will be repeated until the change is <= %e\n", epsilon ); printf ( " Number of processors available = %d\n", omp_get_num_procs ( ) ); printf ( " Number of threads = %d\n", omp_get_max_threads ( ) ); /* Set the boundary values, which don't change. */ mean = 0.0; #pragma omp parallel shared ( w ) private ( i, j ) { #pragma omp for for ( i = 1; i < M - 1; i++ ) { w[i][0] = 100.0; } #pragma omp for for ( i = 1; i < M - 1; i++ ) { w[i][N-1] = 100.0; } #pragma omp for for ( j = 0; j < N; j++ ) { w[M-1][j] = 100.0; } #pragma omp for for ( j = 0; j < N; j++ ) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ #pragma omp for reduction ( + : mean ) for ( i = 1; i < M - 1; i++ ) { mean = mean + w[i][0] + w[i][N-1]; } #pragma omp for reduction ( + : mean ) for ( j = 0; j < N; j++ ) { mean = mean + w[M-1][j] + w[0][j]; } } /* OpenMP note: You cannot normalize MEAN inside the parallel region. It only gets its correct value once you leave the parallel region. So we interrupt the parallel region, set MEAN, and go back in. */ mean = mean / ( double ) ( 2 * M + 2 * N - 4 ); printf ( "\n" ); printf ( " MEAN = %f\n", mean ); /* Initialize the interior solution to the mean value. */ #pragma omp parallel shared ( mean, w ) private ( i, j ) { #pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { w[i][j] = mean; } } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf ( "\n" ); printf ( " Iteration Change\n" ); printf ( "\n" ); wtime = omp_get_wtime ( ); diff = epsilon; while ( epsilon <= diff ) { # pragma omp parallel shared ( u, w ) private ( i, j ) { /* Save the old solution in U. */ # pragma omp for for ( i = 0; i < M; i++ ) { for ( j = 0; j < N; j++ ) { u[i][j] = w[i][j]; } } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ # pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { w[i][j] = ( u[i-1][j] + u[i+1][j] + u[i][j-1] + u[i][j+1] ) / 4.0; } } } /* C and C++ cannot compute a maximum as a reduction operation. Therefore, we define a private variable MY_DIFF for each thread. Once they have all computed their values, we use a CRITICAL section to update DIFF. */ diff = 0.0; # pragma omp parallel shared ( diff, u, w ) private ( i, j, my_diff ) { my_diff = 0.0; # pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { if ( my_diff < fabs ( w[i][j] - u[i][j] ) ) { my_diff = fabs ( w[i][j] - u[i][j] ); } } } # pragma omp critical { if ( diff < my_diff ) { diff = my_diff; } } } iterations++; if ( iterations == iterations_print ) { printf ( " %8d %f\n", iterations, diff ); iterations_print = 2 * iterations_print; } } wtime = omp_get_wtime ( ) - wtime; printf ( "\n" ); printf ( " %8d %f\n", iterations, diff ); printf ( "\n" ); printf ( " Error tolerance achieved.\n" ); printf ( " Wallclock time = %f\n", wtime ); /* Terminate. */ printf ( "\n" ); printf ( "HEATED_PLATE_OPENMP:\n" ); printf ( " Normal end of execution.\n" ); return 0; # undef M # undef N }
GB_unaryop__minv_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_int16 // op(A') function: GB_tran__minv_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_int16 ( uint32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // You can redistribute it and/or modify it under the terms of the GNU // General Public License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // Licensees holding a valid commercial license may use this file in // accordance with the commercial license agreement provided with the software. // // This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE // WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. // // $URL$ // $Id$ // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #include <CGAL/Shape_detection_3/Octree.h> #include <CGAL/Shape_detection_3/Shape_base.h> #include <CGAL/Random.h> //for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> //boost -------------- #include <boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- /*! \file Efficient_RANSAC.h */ namespace CGAL { namespace Shape_detection_3 { /*! \ingroup PkgPointSetShapeDetection3 \brief A shape detection algorithm using a RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits a model of `EfficientRANSACTraits` */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< property map to access the unoriented normal of an input point typedef Shape_base<Traits> Shape; ///< shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif ///< An `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< An `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! %Parameters for the shape detection algorithm. They are explained in detail in Section \ref Point_set_shape_detection_3Parameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} FT probability; ///< Probability to control search endurance. %Default value: 5%. std::size_t min_points; ///< Minimum number of points of a shape. %Default value: 1% of total number of input points. FT epsilon; ///< Maximum tolerance Euclidian distance from a point and a shape. %Default value: 1% of bounding box diagonal. FT normal_threshold; ///< Maximum tolerance normal deviation from a point's normal to the normal on shape at projected point. %Default value: 0.9 (around 25 degrees). FT cluster_epsilon; ///< Maximum distance between points to be considered connected. %Default value: 1% of bounding box diagonal. }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection engine. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(NULL) , m_global_octree(NULL) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instances including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< range of input data. Point_map point_map = Point_map(), ///< property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers in the detection engine the shape type `ShapeType` that must inherit from `Shape_base`. For example, for registering a plane as detectable shape you should call `ransac.add_shape_factory< Shape_detection_3::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection_3::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = NULL; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = NULL; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect( const Parameters &options = Parameters() ///< %Parameters for shape detection. ) { // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); m_options = options; // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points >= m_num_available_points) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples std::size_t required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()(m_num_available_points); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, required_samples); } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } if (failed_candidates >= 10000) force_exit = true; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = NULL; } } candidates.back() = NULL; delete best_candidate; best_candidate = NULL; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = NULL; empty++; occupied++; } candidates.resize(empty); } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = NULL; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } //2.3 Remove the points from the subtrees generated_candidates--; failed_candidates = 0; best_expected = 0; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = NULL; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = NULL; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t>(0), boost::counting_iterator<std::size_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()(m_global_octree->maxLevel() + 1); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesnt have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H
GB_binop__plus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__plus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__plus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int32) // A*D function (colscale): GB (_AxD__plus_int32) // D*A function (rowscale): GB (_DxB__plus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int32) // C=scalar+B GB (_bind1st__plus_int32) // C=scalar+B' GB (_bind1st_tran__plus_int32) // C=A+scalar GB (_bind2nd__plus_int32) // C=A'+scalar GB (_bind2nd_tran__plus_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log_fc32_fc32) // op(A') function: GB (_unop_tran__log_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = clogf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clogf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = clogf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ops.h
#pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #define MIN 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(T *x, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #include <helpers/sharedmem.h> #define no_op_exec_special_cuda static __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer,T *result, Nd4jLong *resultShapeBuffer,T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 #ifdef _OPENMP #pragma omp declare reduction(maxT : float,double,float16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minT : float,double,float16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(sumT : float,double,float16 : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0.0f) #endif namespace functions { namespace indexreduce { template<typename T> struct IndexValue { T value; Nd4jLong index; }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template<typename T> class Add { public: op_def static T op(T d1, T d2) { return d1 + d2; } op_def static T op(T d1, T d2, T *params) { return d1 + d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 + params[0]; } op_def static T startingValue() { return static_cast<T>(0.f); } }; template<typename T> class Subtract { public: op_def static T op(T d1, T d2) { return d1 - d2; } op_def static T op(T d1, T d2, T *params) { return d1 - d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 - params[0]; } }; template<typename T> class SquaredSubtract { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f)); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1 - params[0], static_cast<T>(2.f)); } }; template<typename T> class ReverseSubtract { public: op_def static T op(T d1, T d2) { return d2 - d1; } op_def static T op(T d1, T d2, T *params) { return d2 - d1; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0] - d1; } }; template<typename T> class LogPoisonLossFull { public: op_def static T op(T z, T c) { return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } op_def static T op(T z, T c, T *params) { return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } op_def static T op(T z) { return (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)); } // op for MetaOps op_def static T op(T z, T *params) { return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } }; template<typename T> class LogPoisonLoss { public: op_def static T op(T z, T c) { return (nd4j::math::nd4j_exp<T>(c) - z * c); } op_def static T op(T z, T c, T *params) { return (nd4j::math::nd4j_exp<T>(c) - z * c); } op_def static T op(T z) { return (z); } // op for MetaOps op_def static T op(T z, T *params) { return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0]); } }; template<typename T> class Multiply { public: op_def static T op(T d1, T d2) { return d1 * d2; } op_def static T op(T d1, T d2, T *params) { return d1 * d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 * params[0]; } op_def static T startingValue() { return static_cast<T>(1.f); } }; template<typename T> class Divide { public: op_def static T op(T d1, T d2) { return d1 / d2; } op_def static T op(T d1, T d2, T *params) { return d1 / d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 / params[0]; } op_def static T startingValue() { return static_cast<T>(1.f); } }; template<typename T> class SafeDivide { public: op_def static T op(T d1, T d2) { if(d2 == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / d2; } op_def static T op(T d1, T d2, T *params) { if(d2 == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { if(params[0] == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / params[0]; } }; template<typename T> class FloorDiv { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_floor<T>(d1 / d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_floor<T>(d1 / d2); } op_def static T op(T d1) { return nd4j::math::nd4j_floor<T>(d1); } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_floor<T>(d1 / params[0]); } }; template<typename T> class TruncateDiv { public: op_def static T op(T d1, T d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<T>(i1 / i2); } op_def static T op(T d1, T d2, T *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<T>(i1 / i2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<T>(i1 / i2); } }; template<typename T> class Remainder { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_remainder(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_remainder(d1, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_remainder(d1, params[0]); } }; template<typename T> class FMod { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_fmod(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_fmod(d1, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_fmod(d1, params[0]); } }; template<typename T> class FloorMod { public: op_def static T op(T d1, T d2) { T m = nd4j::math::nd4j_fmod(d1, d2);; return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2); } op_def static T op(T d1, T d2, T *params) { T m = nd4j::math::nd4j_fmod(d1, d2); return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { T m = nd4j::math::nd4j_fmod(d1, params[0]); return (d1 < static_cast<T>(0.0f)) == (params[0] < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + params[0], params[0]); } }; template<typename T> class ReverseDivide { public: op_def static T op(T d1, T d2) { return d2 / d1; } op_def static T op(T d1, T d2, T *params) { return d2 / d1; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0] / d1; } }; template<typename T> class Copy { public: op_def static T op(T d1, T d2) { return d2; } op_def static T op(T d1, T d2, T *params) { return d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0]; } }; template<typename T> class Copy2 { public: op_def static T op(T d1, T d2) { return d2; } op_def static T op(T d1, T d2, T *params) { return d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0]; } }; template<typename T> class Axpy { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T alpha = params[0]; return alpha * d1 + d2; } op_def static T op(T d1) { return d1; } }; template<typename T> class And { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return d1 != comp && d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class Or { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return d1 != comp || d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class Xor { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return ((d1 == comp && d2 != comp)||(d1 != comp && d2 == comp)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } }; template<typename T> class Not { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T comp = params[0]; return d1 == comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } }; template<typename T> class SetValOrLess { public: op_def static T op(T d1, T d2, T *params) { if (d2 < d1) { return d1; } return d2; } }; template<typename T> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static T op(T d1, T d2) { return (int)d1 % (int)d2; } op_def static T op(T d1, T d2, T *params) { return (int)d1 % (int)d2; } // op for MetaOp op_def static T op(T d1, T *params) { return (int)d1 % (int)params[0]; } }; template<typename T> class ReverseMod { public: op_def static T op(T d1, T d2) { return (int)d2 % (int)d1; } op_def static T op(T d1, T d2, T *params) { return (int)d2 % (int)d1; } // op for MetaOp op_def static T op(T d1, T *params) { return (int)params[0] % (int)d1; } }; /** * Whether 2 elements in an array * are epsilion equal */ template<typename T> class Epsilon { public: op_def static T op(T d1, T d2, T *params) { T diff = d1 - d2; T absDiff = nd4j::math::nd4j_abs<T>(diff); if (absDiff <= static_cast<T>(MIN)) return static_cast<T>(1.0f); return static_cast<T>(0.0f); } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class EqualTo { public: op_def static T op(T d1, T d2) { return d1 == d2; } op_def static T op(T d1, T d2, T *params) { return d1 == d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class NotEqualTo { public: op_def static T op(T d1, T d2) { return d1 != d2; } op_def static T op(T d1, T d2, T *params) { return d1 != d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class GreaterThanOrEqual { public: op_def static T op(T d1, T d2) { return d1 >= d2; } op_def static T op(T d1, T d2, T *params) { return d1 >= d2; } // FIXME: this signature clashes with MetaOp stuff op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class GreaterThan { public: op_def static T op(T d1, T d2) { return d1 > d2; } op_def static T op(T d1, T d2, T *params) { return d1 > d2; } // FIXME: this signature clashes with MetaOp stuff op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class LessThan { public: op_def static T op(T d1, T d2) { return d1 < d2; } op_def static T op(T d1, T d2, T *params) { return d1 < d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class LessThanOrEqual { public: op_def static T op(T d1, T d2) { return d1 <= d2; } op_def static T op(T d1, T d2, T *params) { return d1 <= d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Abs { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_abs<T>(d1); } }; template<typename T> class Ceiling { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_ceil<T>(d1); } }; template<typename T> class Cosine { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cos<T>(d1); } }; template<typename T> class Exp { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_exp<T>(d1); } }; template<typename T> class HardTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return ((d1 >= static_cast<T>(-1.0f) && d1 <= static_cast<T>(1.0f)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f)); } }; template<typename T> class HardTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 < static_cast<T>(-1.0f)) return static_cast<T>(-1.0f); else if (d1 > static_cast<T>(1.0f)) return static_cast<T>(1.0f); else return d1; } }; template<typename T> class Floor { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_floor<T>(d1); } }; template<typename T> class Log { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(d1); } }; template<typename T> class Log1p { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(1+d1); } }; template<typename T> class LogX { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(d1) / nd4j::math::nd4j_log<T>(params[0]) ; } }; template<typename T> class StabilizeFP16 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 <= static_cast<T>(0.f)) return static_cast<T>(0.001f); else return d1; } }; template<typename T> class SpecialDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * (static_cast<T>(1.0f) - d1); } }; template<typename T> class Neg { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return -d1; } }; template<typename T> class Erf { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_erf<T>(d1); } }; template<typename T> class Erfc { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_erfc<T>(d1); } }; template<typename T> class Reciprocal { public: no_op_exec_special no_op_exec_special_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static T op(T d1, T *params) { return (static_cast<T>(1.0f)/d1); } }; template<typename T> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.f)); } op_def static T op(T d1) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)); } }; template<typename T> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_re<T>(d1, params[0]); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_re<T>(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_re<T>(d1, d2); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T d2 = params[0]; T threshold = params[1]; return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1, T d2, T *params) { T threshold = params[0]; return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T d2 = params[0]; T thresholdRelative = params[1]; T thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f); } op_def static T op(T d1, T d2, T *params) { T thresholdRelative = params[0]; T thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1, params[0]); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_pow<T>(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_pow<T>(d1, d2); } op_def static T op(T d1) { return d1; } }; template<typename T> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return params[0] * nd4j::math::nd4j_pow<T>(d1, params[0] - static_cast<T>(1.f)); } op_def static T op(T d1, T d2) { return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f)); } op_def static T op(T d1, T d2, T *params) { return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f)); } op_def static T op(T d1) { return d1; } }; template<typename T> class Round { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_round<T>(d1); } }; template<typename T> class IsNan { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Expm1 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_exp(d1) - static_cast<T>(1.0f); } }; template<typename T> class IsInf { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isinf<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class IsInfOrNan{ public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class IsFinite { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ClipByValue { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 > params[1]) return params[1]; else if (d1 < params[0]) return params[0]; else return d1; } }; template<typename T> class Swish { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * nd4j::math::nd4j_sigmoid<T>(d1); } }; template<typename T> class SwishDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T ex = nd4j::math::nd4j_pow<T>(static_cast<T>(M_E), d1); return (ex * (d1 + ex + static_cast<T>(1.f))) / nd4j::math::nd4j_pow<T>((ex + static_cast<T>(1.f)) , static_cast<T>(2.0f)); } }; template<typename T> class LogSigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log(nd4j::math::nd4j_sigmoid<T>(d1)); } }; template<typename T> class LogSigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T ex = nd4j::math::nd4j_pow<T>(M_E, d1); return static_cast<T>(1.f) / (ex + static_cast<T>(1.f)); } }; template<typename T> class Sigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sigmoid<T>(d1); } }; template<typename T> class SigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sigmoidderivative<T>(d1); } }; template<typename T> class HardSigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_min<T>(static_cast<T>(1.0f), nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), (static_cast<T>(0.2f)) * d1 + static_cast<T>(0.5f))); } }; template<typename T> class HardSigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 < static_cast<T>(-2.5f) || d1 > static_cast<T>(2.5f) ? static_cast<T>(0.0f) : static_cast<T>(0.2f); } }; /** * Scale to be between a min and max */ template<typename T> class SetRange { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T min = params[0]; T max = params[1]; if (d1 >= min && d1 <= max) return d1; if (min == static_cast<T>(0.0f) && max == static_cast<T>(1.0f)) { auto val = static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_exp<T>(-d1)); return (nd4j::math::nd4j_floor<T>(val * (max - min)) + min); } auto ret = (nd4j::math::nd4j_floor<T>(d1 * (max - min)) + min); return ret; } }; template<typename T> class Sin { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sin<T>(d1); } }; template<typename T> class Square { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * d1; } }; template<typename T> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sqrt<T>(d1); } }; template<typename T> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) / nd4j::math::nd4j_sqrt<T>(d1); } }; template<typename T> class Rint { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_rint<T>(d1); } }; template<typename T> class SoftPlus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class Sign { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return (d1 > static_cast<T>(0.0f)) - (d1 < static_cast<T>(0.0f)); } }; template<typename T> class TimesOneMinus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * (static_cast<T>(1.0f) - d1); } }; template<typename T> class RationalTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1; auto tanh = nd4j::math::nd4j_sgn<T>(dis) * (static_cast<T>(1.0f) - (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.0f)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.0f)) ))); return static_cast<T>(1.7159f) * tanh; } }; template<typename T> class RationalTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1; auto a = static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.f)); auto tDeriv = (static_cast<T>(1.0f) + nd4j::math::nd4j_sign<T>(dis) * (static_cast<T>(2.0f) * dis + static_cast<T>(4.0f) * static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(3.f)))) / (a * a); return static_cast<T>(1.7159f) * (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * tDeriv; } }; template<typename T> class Tanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tanh<T>(d1); } }; template<typename T> class RectifiedTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), nd4j::math::nd4j_tanh<T>(d1)); } }; template<typename T> class RectifiedTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? nd4j::math::nd4j_tanhderivative<T>(d1) : static_cast<T>(0.0f); } }; template<typename T> class ATanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_atanh<T>(d1); } }; template<typename T> class TanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tanhderivative<T>(d1); } }; template<typename T> class Cube { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * d1 * d1; } }; template<typename T> class CubeDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return 3 * d1 * d1; } }; template<typename T> class ACos { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_acos<T>(d1); } }; template<typename T> class ASinh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_asinh<T>(d1); } }; template<typename T> class ASinhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(nd4j::math::nd4j_pow(d1, static_cast<T>(2.f)) + static_cast<T>(1.f))); } }; template<typename T> class ACosh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_acosh<T>(d1); } }; template<typename T> class ACoshDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(d1 - static_cast<T>(1.f)) * nd4j::math::nd4j_sqrt(d1 + static_cast<T>(1.f))); } }; template<typename T> class Ones { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f); } }; template<typename T> class SoftSign { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_softsign<T>(d1); } }; template<typename T> class SoftSignDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_softsignderivative<T>(d1); } }; template<typename T> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } // this op return 1.0 if condition met, 0.0 otherwise op_def static T op(T d1, T *extraParams) { T compare = extraParams[0]; T eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); if (mode == 0) // equals return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? 1.0 : 0.0; else if (mode == 1) // not equals return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? 1.0 : 0.0; else if (mode == 2) // less_than return d1 < compare? 1.0 : 0.0; else if (mode ==3) // greater_than return d1 > compare? 1.0 : 0.0; else if (mode == 4) // less_or_equals_than return d1 <= compare? 1.0 : 0.0; else if (mode == 5) // greater_or_equals_than return d1 >= compare? 1.0 : 0.0; else if (mode == 6) // abs_less_than return nd4j::math::nd4j_abs<T>(d1) < compare? 1.0 : 0.0; else if (mode == 7) // abs_greater_than return nd4j::math::nd4j_abs<T>(d1) > compare? 1.0 : 0.0; else if (mode == 8) // is inf return nd4j::math::nd4j_isinf(d1) ? 1.0 : 0.0; else if (mode == 9) // is nan return nd4j::math::nd4j_isnan(d1) ? 1.0 : 0.0; else if (mode == 10) return (d1 == compare) ? 1.0 : 0.0; else if (mode == 11) return (d1 != compare) ? 1.0 : 0.0; else if (mode == 12) // abs_greater_or_equals_than return nd4j::math::nd4j_abs<T>(d1) >= compare? 1.0 : 0.0; else if (mode == 13) // abs_less_or_equals_than return nd4j::math::nd4j_abs<T>(d1) <= compare? 1.0 : 0.0; else printf("Undefined match condition: [%i]\n", mode); return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_elu<T>(d1); } }; template<typename T> class ELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_eluderivative<T>(d1); } }; template<typename T> class RELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 < params[0] ? params[0] : d1; } }; template<typename T> class RELU6 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T relu = d1 < params[0] ? params[0] : d1; return relu < static_cast<T>(6.f) ? relu : static_cast<T>(6.f); } }; template<typename T> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_leakyrelu<T>(d1, params[0]); } }; template<typename T> class SELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) * d1 : static_cast<T>(SELU_LAMBDA) * (static_cast<T>(SELU_ALPHA) * nd4j::math::nd4j_exp<T>(d1) - static_cast<T>(SELU_ALPHA)); } }; template<typename T> class SELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) : static_cast<T>(SELU_ALPHA) * static_cast<T>(SELU_LAMBDA) * nd4j::math::nd4j_exp<T>(d1); } }; template<typename T> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 >= static_cast<T>(0.0f)) return static_cast<T>(1.0f); else return params[0]; } }; template<typename T> class ASin { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_asin<T>(d1); } }; template<typename T> class Sinh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sinh<T>(d1); } }; template<typename T> class SinhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cosh<T>(d1); } }; template<typename T> class Cosh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cosh<T>(d1); } }; template<typename T> class Tan { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tan<T>(d1); } }; template<typename T> class TanDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) / nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_cos<T>(d1), static_cast<T>(2.0f)); } }; template<typename T> class ATan { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_atan(d1); } }; template<typename T> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_atan2<T>(d2, d1); } }; template<typename T> class Identity { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Stabilize { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T k = params[0]; if (d1 * k > static_cast<T>(- MIN_CUTFOFF)) return static_cast<T>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<T>(MIN_CUTFOFF)) return static_cast<T>(MIN_CUTFOFF) / k; return d1; } }; template<typename T> class Step { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return (d1 > params[0] ? static_cast<T>(1.0f) : static_cast<T>(0.0f)); } }; template<typename T> class OneMinus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) - d1; } }; template<typename T> class Sum { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)) * nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f))); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return -reduction; } }; template<typename T> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f))); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 * nd4j::math::nd4j_log<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ASum { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class CountNonZero { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 == static_cast<T>(0.0f) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class CountZero { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 == static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Prod { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(1.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f) ; } }; template<typename T> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(1.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } }; template<typename T> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction / (int) n; } }; template<typename T> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction) / static_cast<T>(n); } }; template<typename T> class Max { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(old, opOutput); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(opOutput, old); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_max<T>(d1, d2); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_max<T>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class AMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2)); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_abs<T>(d1) > nd4j::math::nd4j_abs<T>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class AMin { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_min(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2)); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_abs<T>(d1) < nd4j::math::nd4j_abs<T>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class Min { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(old, opOutput); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(opOutput, old); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_min(d1, d2); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_min(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_sqrt<T>(reduction); } op_def static T op(T d1, T *extraParams) { return d1 * d1; } }; template<typename T> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 * d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { T v = nd4j::math::nd4j_abs(d1); return v * v; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_sqrt<T>(reduction); } }; template<typename T> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_pow(nd4j::math::nd4j_abs(d1), extraParams[0]); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_pow(reduction, static_cast<T>(1.0f) / extraParams[0]); } }; template<typename T> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(reduction), nd4j::math::nd4j_abs<T>(reduction)); } }; template<typename T> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T op(T d1, T *extraParams) { T mean = extraParams[0]; T ret = d1 - mean; return ret * ret; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return reduction / static_cast<T>(n - 1); } }; /** * Standard deviation of a buffer */ template<typename T> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T op(T d1, T *extraParams) { T mean = extraParams[0]; T ret = d1 - mean; return ret * ret; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { T ret = Variance<T>::postProcess(reduction, n, extraParams); T sqrtRet = nd4j::math::nd4j_sqrt<T>(ret); return sqrtRet; } }; template<typename T> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1])); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += d1 * d1; extraParams[1] += d2 * d2; return (d1 * d2); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<T>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<T>(d2 * d2)); return (d1 * d2); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { // num / denom return (static_cast<T>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static T num(T d1, T d2) { return nd4j::math::nd4j_min<T>(d1, d2); } op_def static T denom(T d1, T d2) { return nd4j::math::nd4j_max<T>(d1, d2); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += num(d1, d2); extraParams[1] += denom(d1, d2); return static_cast<T>(0.0f); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<T>(0.0f); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return static_cast<T>(reduction / n); } op_def static T op(T d1, T d2, T *extraParams) { return (d1 == d2) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParams) { return op(d1, d2, extraParams); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class CosineDistance { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return (static_cast<T>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1]))); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1); extraParams[1] += nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2); return (d1 * d2); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2)); return (d1 * d2); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template<typename T> class Dot { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { return d1 * d2; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template<typename T> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { T eps = extraParamsRef[2]; T diff = nd4j::math::nd4j_abs<T>(d1 - d2); // works well except in the range of very large numbers if (diff <= eps) return static_cast<T>(0.f); // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max(nd4j::math::nd4j_abs(d1), nd4j::math::nd4j_abs(d2)) * eps) return static_cast<T>(0.f); return static_cast<T>(1.f); } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; template<typename T> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return nd4j::math::nd4j_sqrt<T>(reduction); } op_def static T op(T d1, T d2, T *extraParamsRef) { T ret = d1 - d2; return ret * ret; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; template<typename T> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { return nd4j::math::nd4j_abs<T>(d1 - d2); } op_def static T update(T old, T opOutput, T *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template<typename T> class IndexAbsoluteMax { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return nd4j::math::nd4j_abs<T>(val); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value); old.value = nd4j::math::nd4j_abs<T>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (nd4j::math::nd4j_abs<T>(f1.value) > nd4j::math::nd4j_abs<T>(f2.value)) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return MIN_FLOAT; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class FirstIndex { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<T>(0.0f)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return - nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = -1; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.index > f2.index) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } }; template<typename T> class LastIndex { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams); if (res == static_cast<T>(0.0f)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return -nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = -1; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.index < f2.index) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } }; template<typename T> class IndexMax { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.value > f2.value) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return -nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class IndexAbsoluteMin { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op( functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value); old.value = nd4j::math::nd4j_abs<T>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (nd4j::math::nd4j_abs<T>(f1.value) < nd4j::math::nd4j_abs<T>(f2.value)) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class IndexMin { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op( functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.value < f2.value) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class SummaryStatsVariance { public: static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) { if (biasCorrected) { T ret = val.varianceBiasCorrected(); if (ret < static_cast<T>(0.0f)) return val.variance(); return ret; } return val.variance(); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) { return d1; } }; template<typename T> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) { if (biasCorrected) { T ret = val.varianceBiasCorrected(); if (ret < static_cast<T>(0.0f)) return nd4j::math::nd4j_sqrt(val.variance()); else return nd4j::math::nd4j_sqrt(ret); } return nd4j::math::nd4j_sqrt(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) { return d1; } }; template<typename T> class DropOut { public: no_op_exec_special no_op_exec_special_cuda inline _CUDA_D static T op(T d1, T *params) { T prob = params[0]; #ifdef __CUDACC__ T length = params[1]; T tid = gridDim.x * blockDim.x + threadIdx.x; T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid))); #else T rnd = static_cast<T>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<T>(0.0f) : d1; } }; template<typename T> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static T op(T d1, T *params) { T prob = params[0]; #ifdef __CUDACC__ T length = params[1]; T tid = gridDim.x * blockDim.x + threadIdx.x; T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid))); #else T rnd = static_cast<T>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<T>(0.0f) : d1 / prob; } }; template<typename T> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T replacement = params[0]; return nd4j::math::nd4j_isnan(d1) ? replacement : d1 ; } }; // this op is used for conditional pairwise transforms only template<typename T> class CompareAndReplace{ public: no_op_exec_special no_op_exec_special_cuda // op definition for PairWise Transform op_def static T op(T d1, T d2, T *params) { T compare = params[0]; T eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than eps if (d1 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d1) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d1) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return d2; else return d1; else if (mode == 10) if (d1 == compare) return d2; else return d1; else if (mode == 11) if (d1 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template<typename T> class CompareAndSet { public: no_op_exec_special no_op_exec_special_cuda // op definition for Transform op_def static T op(T d1, T *params) { T compare = params[0]; T set = params[1]; T eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } // op definition for PairWise Transform op_def static T op(T d1, T d2, T *params) { T compare = params[0]; T eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<T>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
pr35751.c
/* PR c/35751 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (int i) { extern int a[i]; /* { dg-error "must have no linkage|storage size of" } */ static int b[i]; /* { dg-error "storage size of" } */ #pragma omp parallel { a[0] = 0; b[0] = 0; } #pragma omp parallel shared (a, b) { a[0] = 0; b[0] = 0; } #pragma omp parallel private (a, b) { a[0] = 0; b[0] = 0; } #pragma omp parallel firstprivate (a, b) { a[0] = 0; b[0] = 0; } }
GB_unop__sinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sinh_fp32_fp32) // op(A') function: GB (_unop_tran__sinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = sinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = sinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
layerramsubset.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2018-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #pragma once #include <modules/base/basemoduledefine.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #include <inviwo/core/util/glm.h> #include <algorithm> namespace inviwo { namespace util { /** * \brief extracts a subregion from a layer and returns it as a new layer * * This function extracts a subregion given by offset and extent from the input layer. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * * @return std::shared_ptr<LayerRAM> */ IVW_MODULE_BASE_API std::shared_ptr<LayerRAM> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); /** * \brief extracts a subregion from a layer and converts it into a new layer * * This function extracts a subregion given by offset and extent from the input layer. The values * will be converted to type T using util::glm_convert_normalized. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * * @return std::shared_ptr<LayerRAMPrecision<T>> */ template <typename T> std::shared_ptr<LayerRAMPrecision<T>> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); namespace detail { template <typename T> void conversionCopy(const T* src, T* dst, size_t len) { std::copy(src, src + len, dst); } template <typename To, typename From> void conversionCopy(const From* src, To* dst, size_t len) { for (size_t i = 0; i < len; i++) { dst[i] = util::glm_convert_normalized<To, From>(src[i]); } } template <typename T, typename U = T> std::shared_ptr<LayerRAMPrecision<U>> extractLayerSubSet(const LayerRAMPrecision<T>* inLayer, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { // determine parameters const ivec2 srcDim(inLayer->getDimensions()); // adjust the output dimensions to match the intersection of output and input regions const ivec2 srcOffset(glm::max(ivec2(0), offset)); const ivec2 dstOffset = clampBorderOutsideImage ? ivec2(0) : (glm::max(ivec2(0), -offset)); // clamp copy extent to source layer const ivec2 copyExtent = glm::min(ivec2(extent) - dstOffset, srcDim - srcOffset); const ivec2 dstDim = clampBorderOutsideImage ? copyExtent : ivec2(extent); // allocate space auto newLayer = std::make_shared<LayerRAMPrecision<U>>(dstDim); const auto src = inLayer->getDataTyped(); auto dst = newLayer->getDataTyped(); if (!clampBorderOutsideImage) { // clear entire layer as only parts will be copied std::fill(dst, dst + dstDim.x * dstDim.y, U(0)); } // memcpy each row to form sub layer #ifdef IVW_USE_OPENMP #pragma omp parallel for #endif for (int j = 0; j < copyExtent.y; j++) { size_t srcPos = (j + srcOffset.y) * srcDim.x + srcOffset.x; size_t dstPos = (j + dstOffset.y) * dstDim.x + dstOffset.x; conversionCopy(src + srcPos, dst + dstPos, static_cast<size_t>(copyExtent.x)); } return newLayer; } } // namespace detail } // namespace util template <typename T> std::shared_ptr<LayerRAMPrecision<T>> util::layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { return in->getRepresentation<LayerRAM>()->dispatch<std::shared_ptr<LayerRAMPrecision<T>>>( [offset, extent, clampBorderOutsideImage](auto layerpr) { using ValueType = util::PrecisionValueType<decltype(layerpr)>; return util::detail::extractLayerSubSet<ValueType, T>(layerpr, offset, extent, clampBorderOutsideImage); }); } } // namespace inviwo
quantize.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_UTILS_QUANTIZE_H_ #define MACE_UTILS_QUANTIZE_H_ #include <limits> #include <algorithm> namespace mace { template<typename T> inline void AdjustRange(const float in_min_data, const float in_max_data, const bool non_zero, float *scale, int32_t *zero_point) { // re-range to make range include zero float and // make zero float as integer u8 const T quantized_min = std::numeric_limits<T>::lowest(); const T quantized_max = std::numeric_limits<T>::max(); if (quantized_min < 0) { MACE_ASSERT(!non_zero, "Cannot nudge to non_zero quantize value."); } float out_max = std::max(0.f, in_max_data); float out_min = std::min(0.f, in_min_data); // make in_min_data quantize as greater than 1 if (non_zero) { out_min = std::min(out_min, in_min_data - (out_max - in_min_data) / (quantized_max - quantized_min - 1)); } *scale = (out_max - out_min) / (quantized_max - quantized_min); const float kEps = 1e-6; if (out_min < -kEps && out_max > kEps) { float quantized_zero = -out_min / *scale; int32_t quantized_zero_near_int = static_cast<int32_t>(roundf(quantized_zero)); *zero_point = quantized_zero_near_int; if (fabs(quantized_zero - quantized_zero_near_int) > kEps) { if (quantized_zero < quantized_zero_near_int || non_zero) { // keep out_max fixed, and move out_min *zero_point = static_cast<int32_t>(std::ceil(quantized_zero)); *scale = out_max / (quantized_max - *zero_point); } else { // keep out_min fixed, and move out_max *scale = out_min / (quantized_min - *zero_point); } } } else if (out_min > -kEps) { *zero_point = quantized_min; } else { *zero_point = quantized_max; } } template<typename T> inline T Saturate(float value) { int rounded_value = static_cast<int>(value); if (rounded_value <= std::numeric_limits<T>::lowest()) { return std::numeric_limits<T>::lowest(); } else if (rounded_value >= std::numeric_limits<T>::max()) { return std::numeric_limits<T>::max(); } else { return static_cast<T>(rounded_value); } } inline void FindMinMax(const float *input, const index_t size, float *min_val, float *max_val) { float max_v = std::numeric_limits<float>::lowest(); float min_v = std::numeric_limits<float>::max(); for (index_t i = 0; i < size; ++i) { max_v = std::max(max_v, input[i]); min_v = std::min(min_v, input[i]); } *min_val = min_v; *max_val = max_v; } template<typename T> inline void QuantizeWithScaleAndZeropoint(const float *input, const index_t size, float scale, int32_t zero_point, T *output) { float recip_scale = 1 / scale; #pragma omp parallel for for (int i = 0; i < size; ++i) { output[i] = Saturate<T>(roundf(zero_point + recip_scale * input[i])); } } template<typename T> inline void Quantize(const float *input, const index_t size, bool non_zero, T *output, float *scale, int32_t *zero_point) { float in_min_data; float in_max_data; FindMinMax(input, size, &in_min_data, &in_max_data); AdjustRange<T>(in_min_data, in_max_data, non_zero, scale, zero_point); QuantizeWithScaleAndZeropoint(input, size, *scale, *zero_point, output); } template<typename T> inline void Dequantize(const T *input, const index_t size, const float scale, const int32_t zero_point, float *output) { #pragma omp parallel for for (int i = 0; i < size; ++i) { output[i] = scale * (input[i] - zero_point); } } inline void QuantizeMultiplier(double multiplier, int32_t* output_multiplier, int32_t* shift) { if (multiplier == 0.f) { *output_multiplier = 0; *shift = 0; return; } const double q = std::frexp(multiplier, shift); auto qint = static_cast<int64_t>(roundl(q * (1ll << 31))); if (qint == (1ll << 31)) { qint /= 2; ++*shift; } *output_multiplier = static_cast<int32_t>(qint); MACE_CHECK(*output_multiplier <= std::numeric_limits<int32_t>::max()); } inline void GetOutputMultiplierAndShift( const float lhs_scale, const float rhs_scale, const float output_scale, int32_t *quantized_multiplier, int *right_shift) { float real_multiplier = lhs_scale * rhs_scale / output_scale; MACE_CHECK(real_multiplier > 0.f && real_multiplier < 1.f, real_multiplier); int exponent; QuantizeMultiplier(real_multiplier, quantized_multiplier, &exponent); *right_shift = -exponent; MACE_CHECK(*right_shift >= 0); } } // namespace mace #endif // MACE_UTILS_QUANTIZE_H_
launch_latency.c
#include <stdio.h> #include <time.h> #include <omp.h> #define MAX_TEAMS 2048 #define TRIALS (1000) int n =1024; int main(void) { struct timespec t0,t1,t2; int fail = 0; int a = -1; // clock_gettime(CLOCK_REALTIME, &t0); #pragma omp target { //nothing } clock_gettime(CLOCK_REALTIME, &t1); double m = (t1.tv_sec - t0.tv_sec) + (t1.tv_nsec - t0.tv_nsec)/1e9; fprintf(stderr, "1st kernel Time %12.8f\n", m); for (int j = 1; j <= MAX_TEAMS; j = j<<1) { clock_gettime(CLOCK_REALTIME, &t1); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(j) thread_limit(1024) for (int k =0; k < n; k++) { // nothing } } clock_gettime(CLOCK_REALTIME, &t2); double t = (t2.tv_sec - t1.tv_sec) + (t2.tv_nsec - t1.tv_nsec)/1e9; fprintf(stderr, "avg kernel Time %12.8f TEAMS=%d\n", t/TRIALS, j); } printf("Succeeded\n"); return fail; }
pocketfft_hdronly.h
/* This file is part of pocketfft. Copyright (C) 2010-2019 Max-Planck-Society Author: Martin Reinecke All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef POCKETFFT_HDRONLY_H #define POCKETFFT_HDRONLY_H #ifndef __cplusplus #error This file is C++ and requires a C++ compiler. #endif #if !(__cplusplus >= 201103L || _MSVC_LANG+0L >= 201103L) #error This file requires at least C++11 support. #endif #ifndef POCKETFFT_CACHE_SIZE #define POCKETFFT_CACHE_SIZE 16 #endif #include <cmath> #include <cstring> #include <cstdlib> #include <stdexcept> #include <memory> #include <vector> #include <complex> #if POCKETFFT_CACHE_SIZE!=0 #include <array> #include <mutex> #endif #ifdef POCKETFFT_OPENMP #include <omp.h> #endif #if defined(__GNUC__) #define POCKETFFT_NOINLINE __attribute__((noinline)) #define POCKETFFT_RESTRICT __restrict__ #elif defined(_MSC_VER) #define POCKETFFT_NOINLINE __declspec(noinline) #define POCKETFFT_RESTRICT __restrict #else #define POCKETFFT_NOINLINE #define POCKETFFT_RESTRICT #endif namespace pocketfft { namespace detail { using namespace std; using shape_t = vector<size_t>; using stride_t = vector<ptrdiff_t>; constexpr bool FORWARD = true, BACKWARD = false; // only enable vector support for gcc>=5.0 and clang>=5.0 #ifndef POCKETFFT_NO_VECTORS #define POCKETFFT_NO_VECTORS #if defined(__INTEL_COMPILER) // do nothing. This is necessary because this compiler also sets __GNUC__. #elif defined(__clang__) #if __clang__>=5 #undef POCKETFFT_NO_VECTORS #endif #elif defined(__GNUC__) #if __GNUC__>=5 #undef POCKETFFT_NO_VECTORS #endif #endif #endif template<typename T> struct VLEN { static constexpr size_t val=1; }; #ifndef POCKETFFT_NO_VECTORS #if (defined(__AVX512F__)) template<> struct VLEN<float> { static constexpr size_t val=16; }; template<> struct VLEN<double> { static constexpr size_t val=8; }; #elif (defined(__AVX__)) template<> struct VLEN<float> { static constexpr size_t val=8; }; template<> struct VLEN<double> { static constexpr size_t val=4; }; #elif (defined(__SSE2__)) template<> struct VLEN<float> { static constexpr size_t val=4; }; template<> struct VLEN<double> { static constexpr size_t val=2; }; #elif (defined(__VSX__)) template<> struct VLEN<float> { static constexpr size_t val=4; }; template<> struct VLEN<double> { static constexpr size_t val=2; }; #else #define POCKETFFT_NO_VECTORS #endif #endif template<typename T> class arr { private: T *p; size_t sz; #if defined(POCKETFFT_NO_VECTORS) static T *ralloc(size_t num) { if (num==0) return nullptr; void *res = malloc(num*sizeof(T)); if (!res) throw bad_alloc(); return reinterpret_cast<T *>(res); } static void dealloc(T *ptr) { free(ptr); } #elif __cplusplus >= 201703L static T *ralloc(size_t num) { if (num==0) return nullptr; void *res = aligned_alloc(64,num*sizeof(T)); if (!res) throw bad_alloc(); return reinterpret_cast<T *>(res); } static void dealloc(T *ptr) { free(ptr); } #else // portable emulation static T *ralloc(size_t num) { if (num==0) return nullptr; void *ptr = malloc(num*sizeof(T)+64); if (!ptr) throw bad_alloc(); T *res = reinterpret_cast<T *> ((reinterpret_cast<size_t>(ptr) & ~(size_t(63))) + 64); (reinterpret_cast<void**>(res))[-1] = ptr; return res; } static void dealloc(T *ptr) { if (ptr) free((reinterpret_cast<void**>(ptr))[-1]); } #endif public: arr() : p(0), sz(0) {} arr(size_t n) : p(ralloc(n)), sz(n) {} arr(arr &&other) : p(other.p), sz(other.sz) { other.p=nullptr; other.sz=0; } ~arr() { dealloc(p); } void resize(size_t n) { if (n==sz) return; dealloc(p); p = ralloc(n); sz = n; } T &operator[](size_t idx) { return p[idx]; } const T &operator[](size_t idx) const { return p[idx]; } T *data() { return p; } const T *data() const { return p; } size_t size() const { return sz; } }; template<typename T> struct cmplx { T r, i; cmplx() {} cmplx(T r_, T i_) : r(r_), i(i_) {} void Set(T r_, T i_) { r=r_; i=i_; } void Set(T r_) { r=r_; i=T(0); } cmplx &operator+= (const cmplx &other) { r+=other.r; i+=other.i; return *this; } template<typename T2>cmplx &operator*= (T2 other) { r*=other; i*=other; return *this; } cmplx operator+ (const cmplx &other) const { return cmplx(r+other.r, i+other.i); } cmplx operator- (const cmplx &other) const { return cmplx(r-other.r, i-other.i); } template<typename T2> auto operator* (const T2 &other) const -> cmplx<decltype(r*other)> { return {r*other, i*other}; } template<typename T2> auto operator* (const cmplx<T2> &other) const -> cmplx<decltype(r+other.r)> { return {r*other.r-i*other.i, r*other.i + i*other.r}; } template<bool fwd, typename T2> auto special_mul (const cmplx<T2> &other) const -> cmplx<decltype(r+other.r)> { using Tres = cmplx<decltype(r+other.r)>; return fwd ? Tres(r*other.r+i*other.i, i*other.r-r*other.i) : Tres(r*other.r-i*other.i, r*other.i+i*other.r); } }; template<typename T> void PMC(cmplx<T> &a, cmplx<T> &b, const cmplx<T> &c, const cmplx<T> &d) { a = c+d; b = c-d; } template<typename T> cmplx<T> conj(const cmplx<T> &a) { return {a.r, -a.i}; } template<typename T> void ROT90(cmplx<T> &a) { auto tmp_=a.r; a.r=-a.i; a.i=tmp_; } template<bool fwd, typename T> void ROTX90(cmplx<T> &a) { auto tmp_= fwd ? -a.r : a.r; a.r = fwd ? a.i : -a.i; a.i=tmp_; } // // twiddle factor section // template<typename T> class sincos_2pibyn { private: using Thigh = typename conditional<(sizeof(T)>sizeof(double)), T, double>::type; arr<T> data; void my_sincosm1pi (Thigh a_, Thigh *POCKETFFT_RESTRICT res) { if (sizeof(Thigh)>sizeof(double)) // don't have the code for long double { constexpr Thigh pi = Thigh(3.141592653589793238462643383279502884197L); auto s = sin(pi*a_); res[1] = s; res[0] = (s*s)/(-sqrt((1-s)*(1+s))-1); return; } // adapted from https://stackoverflow.com/questions/42792939/ // CAUTION: this function only works for arguments in the range // [-0.25; 0.25]! double a = double(a_); double s = a * a; /* Approximate cos(pi*x)-1 for x in [-0.25,0.25] */ double r = -1.0369917389758117e-4; r = fma (r, s, 1.9294935641298806e-3); r = fma (r, s, -2.5806887942825395e-2); r = fma (r, s, 2.3533063028328211e-1); r = fma (r, s, -1.3352627688538006e+0); r = fma (r, s, 4.0587121264167623e+0); r = fma (r, s, -4.9348022005446790e+0); double c = r*s; /* Approximate sin(pi*x) for x in [-0.25,0.25] */ r = 4.6151442520157035e-4; r = fma (r, s, -7.3700183130883555e-3); r = fma (r, s, 8.2145868949323936e-2); r = fma (r, s, -5.9926452893214921e-1); r = fma (r, s, 2.5501640398732688e+0); r = fma (r, s, -5.1677127800499516e+0); s = s * a; r = r * s; s = fma (a, 3.1415926535897931e+0, r); res[0] = c; res[1] = s; } POCKETFFT_NOINLINE void calc_first_octant(size_t den, T * POCKETFFT_RESTRICT res) { size_t n = (den+4)>>3; if (n==0) return; res[0]=1.; res[1]=0.; if (n==1) return; size_t l1 = size_t(sqrt(n)); arr<Thigh> tmp(2*l1); for (size_t i=1; i<l1; ++i) { my_sincosm1pi(Thigh(2*i)/Thigh(den),&tmp[2*i]); res[2*i ] = T(tmp[2*i]+1); res[2*i+1] = T(tmp[2*i+1]); } size_t start=l1; while(start<n) { Thigh cs[2]; my_sincosm1pi((Thigh(2*start))/Thigh(den),cs); res[2*start] = T(cs[0]+1); res[2*start+1] = T(cs[1]); size_t end = l1; if (start+end>n) end = n-start; for (size_t i=1; i<end; ++i) { Thigh csx[2]={tmp[2*i], tmp[2*i+1]}; res[2*(start+i)] = T(((cs[0]*csx[0] - cs[1]*csx[1] + cs[0]) + csx[0]) + 1); res[2*(start+i)+1] = T((cs[0]*csx[1] + cs[1]*csx[0]) + cs[1] + csx[1]); } start += l1; } } void calc_first_quadrant(size_t n, T * POCKETFFT_RESTRICT res) { T * POCKETFFT_RESTRICT p = res+n; calc_first_octant(n<<1, p); size_t ndone=(n+2)>>2; size_t i=0, idx1=0, idx2=2*ndone-2; for (; i+1<ndone; i+=2, idx1+=2, idx2-=2) { res[idx1] = p[2*i ]; res[idx1+1] = p[2*i+1]; res[idx2] = p[2*i+3]; res[idx2+1] = p[2*i+2]; } if (i!=ndone) { res[idx1] = p[2*i]; res[idx1+1] = p[2*i+1]; } } void calc_first_half(size_t n, T * POCKETFFT_RESTRICT res) { int ndone=int(n+1)>>1; T * p = res+n-1; calc_first_octant(n<<2, p); int i4=0, in=int(n), i=0; for (; i4<=in-i4; ++i, i4+=4) // octant 0 { res[2*i] = p[2*i4]; res[2*i+1] = p[2*i4+1]; } for (; i4-in <= 0; ++i, i4+=4) // octant 1 { auto xm = in-i4; res[2*i] = p[2*xm+1]; res[2*i+1] = p[2*xm]; } for (; i4<=3*in-i4; ++i, i4+=4) // octant 2 { auto xm = i4-in; res[2*i] = -p[2*xm+1]; res[2*i+1] = p[2*xm]; } for (; i<ndone; ++i, i4+=4) // octant 3 { auto xm = 2*in-i4; res[2*i] = -p[2*xm]; res[2*i+1] = p[2*xm+1]; } } void fill_first_quadrant(size_t n, T * POCKETFFT_RESTRICT res) { constexpr T hsqt2 = T(0.707106781186547524400844362104849L); size_t quart = n>>2; if ((n&7)==0) res[quart] = res[quart+1] = hsqt2; for (size_t i=2, j=2*quart-2; i<quart; i+=2, j-=2) { res[j] = res[i+1]; res[j+1] = res[i]; } } POCKETFFT_NOINLINE void fill_first_half(size_t n, T * POCKETFFT_RESTRICT res) { size_t half = n>>1; if ((n&3)==0) for (size_t i=0; i<half; i+=2) { res[i+half] = -res[i+1]; res[i+half+1] = res[i]; } else for (size_t i=2, j=2*half-2; i<half; i+=2, j-=2) { res[j] = -res[i]; res[j+1] = res[i+1]; } } void fill_second_half(size_t n, T * POCKETFFT_RESTRICT res) { if ((n&1)==0) for (size_t i=0; i<n; ++i) res[i+n] = -res[i]; else for (size_t i=2, j=2*n-2; i<n; i+=2, j-=2) { res[j] = res[i]; res[j+1] = -res[i+1]; } } POCKETFFT_NOINLINE void sincos_2pibyn_half(size_t n, T * POCKETFFT_RESTRICT res) { if ((n&3)==0) { calc_first_octant(n, res); fill_first_quadrant(n, res); fill_first_half(n, res); } else if ((n&1)==0) { calc_first_quadrant(n, res); fill_first_half(n, res); } else calc_first_half(n, res); } public: POCKETFFT_NOINLINE sincos_2pibyn(size_t n, bool half) : data(2*n) { sincos_2pibyn_half(n, data.data()); if (!half) fill_second_half(n, data.data()); } T operator[](size_t idx) const { return data[idx]; } const T *rdata() const { return data; } const cmplx<T> *cdata() const { return reinterpret_cast<const cmplx<T> *>(data.data()); } }; struct util // hack to avoid duplicate symbols { static POCKETFFT_NOINLINE size_t largest_prime_factor (size_t n) { size_t res=1; while ((n&1)==0) { res=2; n>>=1; } for (size_t x=3; x*x<=n; x+=2) while ((n%x)==0) { res=x; n/=x; } if (n>1) res=n; return res; } static POCKETFFT_NOINLINE double cost_guess (size_t n) { constexpr double lfp=1.1; // penalty for non-hardcoded larger factors size_t ni=n; double result=0.; while ((n&1)==0) { result+=2; n>>=1; } for (size_t x=3; x*x<=n; x+=2) while ((n%x)==0) { result+= (x<=5) ? double(x) : lfp*double(x); // penalize larger prime factors n/=x; } if (n>1) result+=(n<=5) ? double(n) : lfp*double(n); return result*double(ni); } /* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */ static POCKETFFT_NOINLINE size_t good_size(size_t n) { if (n<=12) return n; size_t bestfac=2*n; for (size_t f2=1; f2<bestfac; f2*=2) for (size_t f23=f2; f23<bestfac; f23*=3) for (size_t f235=f23; f235<bestfac; f235*=5) for (size_t f2357=f235; f2357<bestfac; f2357*=7) for (size_t f235711=f2357; f235711<bestfac; f235711*=11) if (f235711>=n) bestfac=f235711; return bestfac; } static size_t prod(const shape_t &shape) { size_t res=1; for (auto sz: shape) res*=sz; return res; } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace) { auto ndim = shape.size(); if (ndim<1) throw runtime_error("ndim must be >= 1"); if ((stride_in.size()!=ndim) || (stride_out.size()!=ndim)) throw runtime_error("stride dimension mismatch"); if (inplace && (stride_in!=stride_out)) throw runtime_error("stride mismatch"); } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace, const shape_t &axes) { sanity_check(shape, stride_in, stride_out, inplace); auto ndim = shape.size(); shape_t tmp(ndim,0); for (auto ax : axes) { if (ax>=ndim) throw runtime_error("bad axis number"); if (++tmp[ax]>1) throw runtime_error("axis specified repeatedly"); } } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace, size_t axis) { sanity_check(shape, stride_in, stride_out, inplace); if (axis>=shape.size()) throw runtime_error("bad axis number"); } #ifdef POCKETFFT_OPENMP static size_t nthreads() { return size_t(omp_get_num_threads()); } static size_t thread_num() { return size_t(omp_get_thread_num()); } static size_t thread_count (size_t nthreads, const shape_t &shape, size_t axis) { if (nthreads==1) return 1; if (prod(shape) < 20*shape[axis]) return 1; return (nthreads==0) ? size_t(omp_get_max_threads()) : nthreads; } #else static constexpr size_t nthreads() { return 1; } static constexpr size_t thread_num() { return 0; } #endif }; // // complex FFTPACK transforms // template<typename T0> class cfftp { private: struct fctdata { size_t fct; cmplx<T0> *tw, *tws; }; size_t length; arr<cmplx<T0>> mem; vector<fctdata> fact; void add_factor(size_t factor) { fact.push_back({factor, nullptr, nullptr}); } template<bool fwd, typename T> void pass2 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=2; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { CH(0,k,0) = CC(0,0,k)+CC(0,1,k); CH(0,k,1) = CC(0,0,k)-CC(0,1,k); } else for (size_t k=0; k<l1; ++k) { CH(0,k,0) = CC(0,0,k)+CC(0,1,k); CH(0,k,1) = CC(0,0,k)-CC(0,1,k); for (size_t i=1; i<ido; ++i) { CH(i,k,0) = CC(i,0,k)+CC(i,1,k); CH(i,k,1) = (CC(i,0,k)-CC(i,1,k)).template special_mul<fwd>(WA(0,i)); } } } #define POCKETFFT_PREP3(idx) \ T t0 = CC(idx,0,k), t1, t2; \ PMC (t1,t2,CC(idx,1,k),CC(idx,2,k)); \ CH(idx,k,0)=t0+t1; #define POCKETFFT_PARTSTEP3a(u1,u2,twr,twi) \ { \ T ca,cb; \ ca=t0+t1*twr; \ cb=t2*twi; ROT90(cb); \ PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) ;\ } #define POCKETFFT_PARTSTEP3b(u1,u2,twr,twi) \ { \ T ca,cb,da,db; \ ca=t0+t1*twr; \ cb=t2*twi; ROT90(cb); \ PMC(da,db,ca,cb); \ CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \ CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \ } template<bool fwd, typename T> void pass3 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=3; constexpr T0 tw1r=-0.5, tw1i= (fwd ? -1: 1) * T0(0.8660254037844386467637231707529362L); auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { POCKETFFT_PREP3(0) POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i) } else for (size_t k=0; k<l1; ++k) { { POCKETFFT_PREP3(0) POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i) } for (size_t i=1; i<ido; ++i) { POCKETFFT_PREP3(i) POCKETFFT_PARTSTEP3b(1,2,tw1r,tw1i) } } } #undef POCKETFFT_PARTSTEP3b #undef POCKETFFT_PARTSTEP3a #undef POCKETFFT_PREP3 template<bool fwd, typename T> void pass4 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=4; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { T t1, t2, t3, t4; PMC(t2,t1,CC(0,0,k),CC(0,2,k)); PMC(t3,t4,CC(0,1,k),CC(0,3,k)); ROTX90<fwd>(t4); PMC(CH(0,k,0),CH(0,k,2),t2,t3); PMC(CH(0,k,1),CH(0,k,3),t1,t4); } else for (size_t k=0; k<l1; ++k) { { T t1, t2, t3, t4; PMC(t2,t1,CC(0,0,k),CC(0,2,k)); PMC(t3,t4,CC(0,1,k),CC(0,3,k)); ROTX90<fwd>(t4); PMC(CH(0,k,0),CH(0,k,2),t2,t3); PMC(CH(0,k,1),CH(0,k,3),t1,t4); } for (size_t i=1; i<ido; ++i) { T c2, c3, c4, t1, t2, t3, t4; T cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k); PMC(t2,t1,cc0,cc2); PMC(t3,t4,cc1,cc3); ROTX90<fwd>(t4); PMC(CH(i,k,0),c3,t2,t3); PMC(c2,c4,t1,t4); CH(i,k,1) = c2.template special_mul<fwd>(WA(0,i)); CH(i,k,2) = c3.template special_mul<fwd>(WA(1,i)); CH(i,k,3) = c4.template special_mul<fwd>(WA(2,i)); } } } #define POCKETFFT_PREP5(idx) \ T t0 = CC(idx,0,k), t1, t2, t3, t4; \ PMC (t1,t4,CC(idx,1,k),CC(idx,4,k)); \ PMC (t2,t3,CC(idx,2,k),CC(idx,3,k)); \ CH(idx,k,0).r=t0.r+t1.r+t2.r; \ CH(idx,k,0).i=t0.i+t1.i+t2.i; #define POCKETFFT_PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \ { \ T ca,cb; \ ca.r=t0.r+twar*t1.r+twbr*t2.r; \ ca.i=t0.i+twar*t1.i+twbr*t2.i; \ cb.i=twai*t4.r twbi*t3.r; \ cb.r=-(twai*t4.i twbi*t3.i); \ PMC(CH(0,k,u1),CH(0,k,u2),ca,cb); \ } #define POCKETFFT_PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \ { \ T ca,cb,da,db; \ ca.r=t0.r+twar*t1.r+twbr*t2.r; \ ca.i=t0.i+twar*t1.i+twbr*t2.i; \ cb.i=twai*t4.r twbi*t3.r; \ cb.r=-(twai*t4.i twbi*t3.i); \ PMC(da,db,ca,cb); \ CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \ CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \ } template<bool fwd, typename T> void pass5 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=5; constexpr T0 tw1r= T0(0.3090169943749474241022934171828191L), tw1i= (fwd ? -1: 1) * T0(0.9510565162951535721164393333793821L), tw2r= T0(-0.8090169943749474241022934171828191L), tw2i= (fwd ? -1: 1) * T0(0.5877852522924731291687059546390728L); auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { POCKETFFT_PREP5(0) POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) } else for (size_t k=0; k<l1; ++k) { { POCKETFFT_PREP5(0) POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) } for (size_t i=1; i<ido; ++i) { POCKETFFT_PREP5(i) POCKETFFT_PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i) POCKETFFT_PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i) } } } #undef POCKETFFT_PARTSTEP5b #undef POCKETFFT_PARTSTEP5a #undef POCKETFFT_PREP5 #define POCKETFFT_PREP7(idx) \ T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \ PMC (t2,t7,CC(idx,1,k),CC(idx,6,k)); \ PMC (t3,t6,CC(idx,2,k),CC(idx,5,k)); \ PMC (t4,t5,CC(idx,3,k),CC(idx,4,k)); \ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i; #define POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \ { \ T ca,cb; \ ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \ ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \ cb.i=y1*t7.r y2*t6.r y3*t5.r; \ cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \ PMC(out1,out2,ca,cb); \ } #define POCKETFFT_PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \ POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2)) #define POCKETFFT_PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \ { \ T da,db; \ POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \ CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \ CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \ } template<bool fwd, typename T> void pass7(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=7; constexpr T0 tw1r= T0(0.6234898018587335305250048840042398L), tw1i= (fwd ? -1 : 1) * T0(0.7818314824680298087084445266740578L), tw2r= T0(-0.2225209339563144042889025644967948L), tw2i= (fwd ? -1 : 1) * T0(0.9749279121818236070181316829939312L), tw3r= T0(-0.9009688679024191262361023195074451L), tw3i= (fwd ? -1 : 1) * T0(0.433883739117558120475768332848359L); auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { POCKETFFT_PREP7(0) POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) } else for (size_t k=0; k<l1; ++k) { { POCKETFFT_PREP7(0) POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) } for (size_t i=1; i<ido; ++i) { POCKETFFT_PREP7(i) POCKETFFT_PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) POCKETFFT_PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) POCKETFFT_PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) } } } #undef POCKETFFT_PARTSTEP7 #undef POCKETFFT_PARTSTEP7a0 #undef POCKETFFT_PARTSTEP7a #undef POCKETFFT_PREP7 template <bool fwd, typename T> void ROTX45(T &a) { constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L); if (fwd) { auto tmp_=a.r; a.r=hsqt2*(a.r+a.i); a.i=hsqt2*(a.i-tmp_); } else { auto tmp_=a.r; a.r=hsqt2*(a.r-a.i); a.i=hsqt2*(a.i+tmp_); } } template <bool fwd, typename T> void ROTX135(T &a) { constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L); if (fwd) { auto tmp_=a.r; a.r=hsqt2*(a.i-a.r); a.i=hsqt2*(-tmp_-a.i); } else { auto tmp_=a.r; a.r=hsqt2*(-a.r-a.i); a.i=hsqt2*(tmp_-a.i); } } template<typename T> inline void PMINPLACE(T &a, T &b) { T t = a; a.r+=b.r; a.i+=b.i; b.r=t.r-b.r; b.i=t.i-b.i; } template<bool fwd, typename T> void pass8 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=8; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { T a0, a1, a2, a3, a4, a5, a6, a7; PMC(a0,a4,CC(0,0,k),CC(0,4,k)); PMC(a1,a5,CC(0,1,k),CC(0,5,k)); PMC(a2,a6,CC(0,2,k),CC(0,6,k)); PMC(a3,a7,CC(0,3,k),CC(0,7,k)); ROTX90<fwd>(a6); ROTX90<fwd>(a7); PMINPLACE(a0,a2); PMINPLACE(a1,a3); PMINPLACE(a4,a6); PMINPLACE(a5,a7); ROTX45<fwd>(a5); ROTX90<fwd>(a3); ROTX135<fwd>(a7); PMC(CH(0,k,0),CH(0,k,4),a0,a1); PMC(CH(0,k,1),CH(0,k,5),a4,a5); PMC(CH(0,k,2),CH(0,k,6),a2,a3); PMC(CH(0,k,3),CH(0,k,7),a6,a7); } else for (size_t k=0; k<l1; ++k) { T a0, a1, a2, a3, a4, a5, a6, a7; PMC(a0,a4,CC(0,0,k),CC(0,4,k)); PMC(a1,a5,CC(0,1,k),CC(0,5,k)); PMC(a2,a6,CC(0,2,k),CC(0,6,k)); PMC(a3,a7,CC(0,3,k),CC(0,7,k)); ROTX90<fwd>(a6); ROTX90<fwd>(a7); PMINPLACE(a0,a2); PMINPLACE(a1,a3); PMINPLACE(a4,a6); PMINPLACE(a5,a7); ROTX45<fwd>(a5); ROTX90<fwd>(a3); ROTX135<fwd>(a7); PMC(CH(0,k,0),CH(0,k,4),a0,a1); PMC(CH(0,k,1),CH(0,k,5),a4,a5); PMC(CH(0,k,2),CH(0,k,6),a2,a3); PMC(CH(0,k,3),CH(0,k,7),a6,a7); for (size_t i=1; i<ido; ++i) { T a0, a1, a2, a3, a4, a5, a6, a7; PMC(a0,a4,CC(i,0,k),CC(i,4,k)); PMC(a1,a5,CC(i,1,k),CC(i,5,k)); PMC(a2,a6,CC(i,2,k),CC(i,6,k)); PMC(a3,a7,CC(i,3,k),CC(i,7,k)); ROTX90<fwd>(a6); ROTX90<fwd>(a7); PMINPLACE(a0,a2); PMINPLACE(a1,a3); PMINPLACE(a4,a6); PMINPLACE(a5,a7); ROTX45<fwd>(a5); ROTX90<fwd>(a3); ROTX135<fwd>(a7); PMINPLACE(a0,a1); PMINPLACE(a2,a3); PMINPLACE(a4,a5); PMINPLACE(a6,a7); CH(i,k,0) = a0; CH(i,k,1) = a4.template special_mul<fwd>(WA(0,i)); CH(i,k,2) = a2.template special_mul<fwd>(WA(1,i)); CH(i,k,3) = a6.template special_mul<fwd>(WA(2,i)); CH(i,k,4) = a1.template special_mul<fwd>(WA(3,i)); CH(i,k,5) = a5.template special_mul<fwd>(WA(4,i)); CH(i,k,6) = a3.template special_mul<fwd>(WA(5,i)); CH(i,k,7) = a7.template special_mul<fwd>(WA(6,i)); } } } #define POCKETFFT_PREP11(idx) \ T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \ PMC (t2,t11,CC(idx,1,k),CC(idx,10,k)); \ PMC (t3,t10,CC(idx,2,k),CC(idx, 9,k)); \ PMC (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)); \ PMC (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)); \ PMC (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)); \ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i; #define POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \ { \ T ca = t1 + t2*x1 + t3*x2 + t4*x3 + t5*x4 +t6*x5, \ cb; \ cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \ cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \ PMC(out1,out2,ca,cb); \ } #define POCKETFFT_PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \ POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2)) #define POCKETFFT_PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \ { \ T da,db; \ POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \ CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \ CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \ } template<bool fwd, typename T> void pass11 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=11; constexpr T0 tw1r= T0(0.8412535328311811688618116489193677L), tw1i= (fwd ? -1 : 1) * T0(0.5406408174555975821076359543186917L), tw2r= T0(0.4154150130018864255292741492296232L), tw2i= (fwd ? -1 : 1) * T0(0.9096319953545183714117153830790285L), tw3r= T0(-0.1423148382732851404437926686163697L), tw3i= (fwd ? -1 : 1) * T0(0.9898214418809327323760920377767188L), tw4r= T0(-0.6548607339452850640569250724662936L), tw4i= (fwd ? -1 : 1) * T0(0.7557495743542582837740358439723444L), tw5r= T0(-0.9594929736144973898903680570663277L), tw5i= (fwd ? -1 : 1) * T0(0.2817325568414296977114179153466169L); auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i-1+x*(ido-1)]; }; if (ido==1) for (size_t k=0; k<l1; ++k) { POCKETFFT_PREP11(0) POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) } else for (size_t k=0; k<l1; ++k) { { POCKETFFT_PREP11(0) POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) } for (size_t i=1; i<ido; ++i) { POCKETFFT_PREP11(i) POCKETFFT_PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) POCKETFFT_PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) POCKETFFT_PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) POCKETFFT_PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) POCKETFFT_PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) } } } #undef PARTSTEP11 #undef PARTSTEP11a0 #undef PARTSTEP11a #undef POCKETFFT_PREP11 template<bool fwd, typename T> void passg (size_t ido, size_t ip, size_t l1, T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const cmplx<T0> * POCKETFFT_RESTRICT wa, const cmplx<T0> * POCKETFFT_RESTRICT csarr) { const size_t cdim=ip; size_t ipph = (ip+1)/2; size_t idl1 = ido*l1; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CX = [cc, ido, l1](size_t a, size_t b, size_t c) -> T& { return cc[a+ido*(b+l1*c)]; }; auto CX2 = [cc, idl1](size_t a, size_t b) -> T& { return cc[a+idl1*b]; }; auto CH2 = [ch, idl1](size_t a, size_t b) -> const T& { return ch[a+idl1*b]; }; arr<cmplx<T0>> wal(ip); wal[0] = cmplx<T0>(1., 0.); for (size_t i=1; i<ip; ++i) wal[i]=cmplx<T0>(csarr[i].r,fwd ? -csarr[i].i : csarr[i].i); for (size_t k=0; k<l1; ++k) for (size_t i=0; i<ido; ++i) CH(i,k,0) = CC(i,0,k); for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) for (size_t k=0; k<l1; ++k) for (size_t i=0; i<ido; ++i) PMC(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k)); for (size_t k=0; k<l1; ++k) for (size_t i=0; i<ido; ++i) { T tmp = CH(i,k,0); for (size_t j=1; j<ipph; ++j) tmp+=CH(i,k,j); CX(i,k,0) = tmp; } for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc) { // j=0 for (size_t ik=0; ik<idl1; ++ik) { CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r; CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i; CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i; CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r; } size_t iwal=2*l; size_t j=3, jc=ip-3; for (; j<ipph-1; j+=2, jc-=2) { iwal+=l; if (iwal>ip) iwal-=ip; cmplx<T0> xwal=wal[iwal]; iwal+=l; if (iwal>ip) iwal-=ip; cmplx<T0> xwal2=wal[iwal]; for (size_t ik=0; ik<idl1; ++ik) { CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r; CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r; CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i; CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i; } } for (; j<ipph; ++j, --jc) { iwal+=l; if (iwal>ip) iwal-=ip; cmplx<T0> xwal=wal[iwal]; for (size_t ik=0; ik<idl1; ++ik) { CX2(ik,l).r += CH2(ik,j).r*xwal.r; CX2(ik,l).i += CH2(ik,j).i*xwal.r; CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i; CX2(ik,lc).i += CH2(ik,jc).r*xwal.i; } } } // shuffling and twiddling if (ido==1) for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) for (size_t ik=0; ik<idl1; ++ik) { T t1=CX2(ik,j), t2=CX2(ik,jc); PMC(CX2(ik,j),CX2(ik,jc),t1,t2); } else { for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) for (size_t k=0; k<l1; ++k) { T t1=CX(0,k,j), t2=CX(0,k,jc); PMC(CX(0,k,j),CX(0,k,jc),t1,t2); for (size_t i=1; i<ido; ++i) { T x1, x2; PMC(x1,x2,CX(i,k,j),CX(i,k,jc)); size_t idij=(j-1)*(ido-1)+i-1; CX(i,k,j) = x1.template special_mul<fwd>(wa[idij]); idij=(jc-1)*(ido-1)+i-1; CX(i,k,jc) = x2.template special_mul<fwd>(wa[idij]); } } } } template<bool fwd, typename T> void pass_all(T c[], T0 fct) { if (length==1) { c[0]*=fct; return; } size_t l1=1; arr<T> ch(length); T *p1=c, *p2=ch.data(); for(size_t k1=0; k1<fact.size(); k1++) { size_t ip=fact[k1].fct; size_t l2=ip*l1; size_t ido = length/l2; if (ip==4) pass4<fwd> (ido, l1, p1, p2, fact[k1].tw); else if(ip==8) pass8<fwd>(ido, l1, p1, p2, fact[k1].tw); else if(ip==2) pass2<fwd>(ido, l1, p1, p2, fact[k1].tw); else if(ip==3) pass3<fwd> (ido, l1, p1, p2, fact[k1].tw); else if(ip==5) pass5<fwd> (ido, l1, p1, p2, fact[k1].tw); else if(ip==7) pass7<fwd> (ido, l1, p1, p2, fact[k1].tw); else if(ip==11) pass11<fwd> (ido, l1, p1, p2, fact[k1].tw); else { passg<fwd>(ido, ip, l1, p1, p2, fact[k1].tw, fact[k1].tws); swap(p1,p2); } swap(p1,p2); l1=l2; } if (p1!=c) { if (fct!=1.) for (size_t i=0; i<length; ++i) c[i] = ch[i]*fct; else memcpy (c,p1,length*sizeof(T)); } else if (fct!=1.) for (size_t i=0; i<length; ++i) c[i] *= fct; } public: template<typename T> void forward(T c[], T0 fct) { pass_all<true>(c, fct); } template<typename T> void backward(T c[], T0 fct) { pass_all<false>(c, fct); } private: POCKETFFT_NOINLINE void factorize() { size_t len=length; while ((len&7)==0) { add_factor(8); len>>=3; } while ((len&3)==0) { add_factor(4); len>>=2; } if ((len&1)==0) { len>>=1; // factor 2 should be at the front of the factor list add_factor(2); swap(fact[0].fct, fact.back().fct); } for (size_t divisor=3; divisor*divisor<=len; divisor+=2) while ((len%divisor)==0) { add_factor(divisor); len/=divisor; } if (len>1) add_factor(len); } size_t twsize() const { size_t twsize=0, l1=1; for (size_t k=0; k<fact.size(); ++k) { size_t ip=fact[k].fct, ido= length/(l1*ip); twsize+=(ip-1)*(ido-1); if (ip>11) twsize+=ip; l1*=ip; } return twsize; } void comp_twiddle() { sincos_2pibyn<T0> twid(length, false); auto twiddle = twid.cdata(); size_t l1=1; size_t memofs=0; for (size_t k=0; k<fact.size(); ++k) { size_t ip=fact[k].fct, ido=length/(l1*ip); fact[k].tw=mem.data()+memofs; memofs+=(ip-1)*(ido-1); for (size_t j=1; j<ip; ++j) for (size_t i=1; i<ido; ++i) fact[k].tw[(j-1)*(ido-1)+i-1] = twiddle[j*l1*i]; if (ip>11) { fact[k].tws=mem.data()+memofs; memofs+=ip; for (size_t j=0; j<ip; ++j) fact[k].tws[j] = twiddle[j*l1*ido]; } l1*=ip; } } public: POCKETFFT_NOINLINE cfftp(size_t length_) : length(length_) { if (length==0) throw runtime_error("zero length FFT requested"); if (length==1) return; factorize(); mem.resize(twsize()); comp_twiddle(); } }; // // real-valued FFTPACK transforms // template<typename T0> class rfftp { private: struct fctdata { size_t fct; T0 *tw, *tws; }; size_t length; arr<T0> mem; vector<fctdata> fact; void add_factor(size_t factor) { fact.push_back({factor, nullptr, nullptr}); } template<typename T> inline void PM(T &a, T &b, T c, T d) { a=c+d; b=c-d; } /* (a+ib) = conj(c+id) * (e+if) */ template<typename T1, typename T2, typename T3> inline void MULPM (T1 &a, T1 &b, T2 c, T2 d, T3 e, T3 f) { a=c*e+d*f; b=c*f-d*e; } template<typename T> void radf2 (size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=2; auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+l1*c)]; }; auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+cdim*c)]; }; for (size_t k=0; k<l1; k++) PM (CH(0,0,k),CH(ido-1,1,k),CC(0,k,0),CC(0,k,1)); if ((ido&1)==0) for (size_t k=0; k<l1; k++) { CH( 0,1,k) = -CC(ido-1,k,1); CH(ido-1,0,k) = CC(ido-1,k,0); } if (ido<=2) return; for (size_t k=0; k<l1; k++) for (size_t i=2; i<ido; i+=2) { size_t ic=ido-i; T tr2, ti2; MULPM (tr2,ti2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); PM (CH(i-1,0,k),CH(ic-1,1,k),CC(i-1,k,0),tr2); PM (CH(i ,0,k),CH(ic ,1,k),ti2,CC(i ,k,0)); } } // a2=a+b; b2=i*(b-a); #define POCKETFFT_REARRANGE(rx, ix, ry, iy) \ {\ auto t1=rx+ry, t2=ry-rx, t3=ix+iy, t4=ix-iy; \ rx=t1; ix=t3; ry=t4; iy=t2; \ } template<typename T> void radf3(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=3; constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+l1*c)]; }; auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+cdim*c)]; }; for (size_t k=0; k<l1; k++) { T cr2=CC(0,k,1)+CC(0,k,2); CH(0,0,k) = CC(0,k,0)+cr2; CH(0,2,k) = taui*(CC(0,k,2)-CC(0,k,1)); CH(ido-1,1,k) = CC(0,k,0)+taur*cr2; } if (ido==1) return; for (size_t k=0; k<l1; k++) for (size_t i=2; i<ido; i+=2) { size_t ic=ido-i; T di2, di3, dr2, dr3; MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); // d2=conj(WA0)*CC1 MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)); // d3=conj(WA1)*CC2 POCKETFFT_REARRANGE(dr2, di2, dr3, di3); CH(i-1,0,k) = CC(i-1,k,0)+dr2; // c add CH(i ,0,k) = CC(i ,k,0)+di2; T tr2 = CC(i-1,k,0)+taur*dr2; // c add T ti2 = CC(i ,k,0)+taur*di2; T tr3 = taui*dr3; // t3 = taui*i*(d3-d2)? T ti3 = taui*di3; PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr3); // PM(i) = t2+t3 PM(CH(i ,2,k),CH(ic ,1,k),ti3,ti2); // PM(ic) = conj(t2-t3) } } template<typename T> void radf4(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=4; constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+l1*c)]; }; auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+cdim*c)]; }; for (size_t k=0; k<l1; k++) { T tr1,tr2; PM (tr1,CH(0,2,k),CC(0,k,3),CC(0,k,1)); PM (tr2,CH(ido-1,1,k),CC(0,k,0),CC(0,k,2)); PM (CH(0,0,k),CH(ido-1,3,k),tr2,tr1); } if ((ido&1)==0) for (size_t k=0; k<l1; k++) { T ti1=-hsqt2*(CC(ido-1,k,1)+CC(ido-1,k,3)); T tr1= hsqt2*(CC(ido-1,k,1)-CC(ido-1,k,3)); PM (CH(ido-1,0,k),CH(ido-1,2,k),CC(ido-1,k,0),tr1); PM (CH( 0,3,k),CH( 0,1,k),ti1,CC(ido-1,k,2)); } if (ido<=2) return; for (size_t k=0; k<l1; k++) for (size_t i=2; i<ido; i+=2) { size_t ic=ido-i; T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; MULPM(cr2,ci2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); MULPM(cr3,ci3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)); MULPM(cr4,ci4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3)); PM(tr1,tr4,cr4,cr2); PM(ti1,ti4,ci2,ci4); PM(tr2,tr3,CC(i-1,k,0),cr3); PM(ti2,ti3,CC(i ,k,0),ci3); PM(CH(i-1,0,k),CH(ic-1,3,k),tr2,tr1); PM(CH(i ,0,k),CH(ic ,3,k),ti1,ti2); PM(CH(i-1,2,k),CH(ic-1,1,k),tr3,ti4); PM(CH(i ,2,k),CH(ic ,1,k),tr4,ti3); } } template<typename T> void radf5(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=5; constexpr T0 tr11= T0(0.3090169943749474241022934171828191L), ti11= T0(0.9510565162951535721164393333793821L), tr12= T0(-0.8090169943749474241022934171828191L), ti12= T0(0.5877852522924731291687059546390728L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+l1*c)]; }; auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+cdim*c)]; }; for (size_t k=0; k<l1; k++) { T cr2, cr3, ci4, ci5; PM (cr2,ci5,CC(0,k,4),CC(0,k,1)); PM (cr3,ci4,CC(0,k,3),CC(0,k,2)); CH(0,0,k)=CC(0,k,0)+cr2+cr3; CH(ido-1,1,k)=CC(0,k,0)+tr11*cr2+tr12*cr3; CH(0,2,k)=ti11*ci5+ti12*ci4; CH(ido-1,3,k)=CC(0,k,0)+tr12*cr2+tr11*cr3; CH(0,4,k)=ti12*ci5-ti11*ci4; } if (ido==1) return; for (size_t k=0; k<l1;++k) for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2) { T di2, di3, di4, di5, dr2, dr3, dr4, dr5; MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)); MULPM (dr4,di4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3)); MULPM (dr5,di5,WA(3,i-2),WA(3,i-1),CC(i-1,k,4),CC(i,k,4)); POCKETFFT_REARRANGE(dr2, di2, dr5, di5); POCKETFFT_REARRANGE(dr3, di3, dr4, di4); CH(i-1,0,k)=CC(i-1,k,0)+dr2+dr3; CH(i ,0,k)=CC(i ,k,0)+di2+di3; T tr2=CC(i-1,k,0)+tr11*dr2+tr12*dr3; T ti2=CC(i ,k,0)+tr11*di2+tr12*di3; T tr3=CC(i-1,k,0)+tr12*dr2+tr11*dr3; T ti3=CC(i ,k,0)+tr12*di2+tr11*di3; T tr5 = ti11*dr5 + ti12*dr4; T ti5 = ti11*di5 + ti12*di4; T tr4 = ti12*dr5 - ti11*dr4; T ti4 = ti12*di5 - ti11*di4; PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr5); PM(CH(i ,2,k),CH(ic ,1,k),ti5,ti2); PM(CH(i-1,4,k),CH(ic-1,3,k),tr3,tr4); PM(CH(i ,4,k),CH(ic ,3,k),ti4,ti3); } } #undef POCKETFFT_REARRANGE template<typename T> void radfg(size_t ido, size_t ip, size_t l1, T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr) { const size_t cdim=ip; size_t ipph=(ip+1)/2; size_t idl1 = ido*l1; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> const T& { return ch[a+ido*(b+l1*c)]; }; auto C1 = [cc,ido,l1] (size_t a, size_t b, size_t c) -> T& { return cc[a+ido*(b+l1*c)]; }; auto C2 = [cc,idl1] (size_t a, size_t b) -> T& { return cc[a+idl1*b]; }; auto CH2 = [ch,idl1] (size_t a, size_t b) -> T& { return ch[a+idl1*b]; }; if (ido>1) { for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 114 { size_t is=(j-1)*(ido-1), is2=(jc-1)*(ido-1); for (size_t k=0; k<l1; ++k) // 113 { size_t idij=is; size_t idij2=is2; for (size_t i=1; i<=ido-2; i+=2) // 112 { T t1=C1(i,k,j ), t2=C1(i+1,k,j ), t3=C1(i,k,jc), t4=C1(i+1,k,jc); T x1=wa[idij]*t1 + wa[idij+1]*t2, x2=wa[idij]*t2 - wa[idij+1]*t1, x3=wa[idij2]*t3 + wa[idij2+1]*t4, x4=wa[idij2]*t4 - wa[idij2+1]*t3; C1(i ,k,j ) = x1+x3; C1(i ,k,jc) = x2-x4; C1(i+1,k,j ) = x2+x4; C1(i+1,k,jc) = x3-x1; idij+=2; idij2+=2; } } } } for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 123 for (size_t k=0; k<l1; ++k) // 122 { T t1=C1(0,k,j), t2=C1(0,k,jc); C1(0,k,j ) = t1+t2; C1(0,k,jc) = t2-t1; } //everything in C //memset(ch,0,ip*l1*ido*sizeof(double)); for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) // 127 { for (size_t ik=0; ik<idl1; ++ik) // 124 { CH2(ik,l ) = C2(ik,0)+csarr[2*l]*C2(ik,1)+csarr[4*l]*C2(ik,2); CH2(ik,lc) = csarr[2*l+1]*C2(ik,ip-1)+csarr[4*l+1]*C2(ik,ip-2); } size_t iang = 2*l; size_t j=3, jc=ip-3; for (; j<ipph-3; j+=4,jc-=4) // 126 { iang+=l; if (iang>=ip) iang-=ip; T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1]; iang+=l; if (iang>=ip) iang-=ip; T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1]; iang+=l; if (iang>=ip) iang-=ip; T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1]; iang+=l; if (iang>=ip) iang-=ip; T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) // 125 { CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1) +ar3*C2(ik,j +2)+ar4*C2(ik,j +3); CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1) +ai3*C2(ik,jc-2)+ai4*C2(ik,jc-3); } } for (; j<ipph-1; j+=2,jc-=2) // 126 { iang+=l; if (iang>=ip) iang-=ip; T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1]; iang+=l; if (iang>=ip) iang-=ip; T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) // 125 { CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1); CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1); } } for (; j<ipph; ++j,--jc) // 126 { iang+=l; if (iang>=ip) iang-=ip; T0 ar=csarr[2*iang], ai=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) // 125 { CH2(ik,l ) += ar*C2(ik,j ); CH2(ik,lc) += ai*C2(ik,jc); } } } for (size_t ik=0; ik<idl1; ++ik) // 101 CH2(ik,0) = C2(ik,0); for (size_t j=1; j<ipph; ++j) // 129 for (size_t ik=0; ik<idl1; ++ik) // 128 CH2(ik,0) += C2(ik,j); // everything in CH at this point! //memset(cc,0,ip*l1*ido*sizeof(double)); for (size_t k=0; k<l1; ++k) // 131 for (size_t i=0; i<ido; ++i) // 130 CC(i,0,k) = CH(i,k,0); for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 137 { size_t j2=2*j-1; for (size_t k=0; k<l1; ++k) // 136 { CC(ido-1,j2,k) = CH(0,k,j); CC(0,j2+1,k) = CH(0,k,jc); } } if (ido==1) return; for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 140 { size_t j2=2*j-1; for(size_t k=0; k<l1; ++k) // 139 for(size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 138 { CC(i ,j2+1,k) = CH(i ,k,j )+CH(i ,k,jc); CC(ic ,j2 ,k) = CH(i ,k,j )-CH(i ,k,jc); CC(i+1 ,j2+1,k) = CH(i+1,k,j )+CH(i+1,k,jc); CC(ic+1,j2 ,k) = CH(i+1,k,jc)-CH(i+1,k,j ); } } } template<typename T> void radb2(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=2; auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; for (size_t k=0; k<l1; k++) PM (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(ido-1,1,k)); if ((ido&1)==0) for (size_t k=0; k<l1; k++) { CH(ido-1,k,0) = 2*CC(ido-1,0,k); CH(ido-1,k,1) =-2*CC(0 ,1,k); } if (ido<=2) return; for (size_t k=0; k<l1;++k) for (size_t i=2; i<ido; i+=2) { size_t ic=ido-i; T ti2, tr2; PM (CH(i-1,k,0),tr2,CC(i-1,0,k),CC(ic-1,1,k)); PM (ti2,CH(i ,k,0),CC(i ,0,k),CC(ic ,1,k)); MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ti2,tr2); } } template<typename T> void radb3(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=3; constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; for (size_t k=0; k<l1; k++) { T tr2=2*CC(ido-1,1,k); T cr2=CC(0,0,k)+taur*tr2; CH(0,k,0)=CC(0,0,k)+tr2; T ci3=2*taui*CC(0,2,k); PM (CH(0,k,2),CH(0,k,1),cr2,ci3); } if (ido==1) return; for (size_t k=0; k<l1; k++) for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2) { T tr2=CC(i-1,2,k)+CC(ic-1,1,k); // t2=CC(I) + conj(CC(ic)) T ti2=CC(i ,2,k)-CC(ic ,1,k); T cr2=CC(i-1,0,k)+taur*tr2; // c2=CC +taur*t2 T ci2=CC(i ,0,k)+taur*ti2; CH(i-1,k,0)=CC(i-1,0,k)+tr2; // CH=CC+t2 CH(i ,k,0)=CC(i ,0,k)+ti2; T cr3=taui*(CC(i-1,2,k)-CC(ic-1,1,k));// c3=taui*(CC(i)-conj(CC(ic))) T ci3=taui*(CC(i ,2,k)+CC(ic ,1,k)); T di2, di3, dr2, dr3; PM(dr3,dr2,cr2,ci3); // d2= (cr2-ci3, ci2+cr3) = c2+i*c3 PM(di2,di3,ci2,cr3); // d3= (cr2+ci3, ci2-cr3) = c2-i*c3 MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2); // ch = WA*d2 MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3); } } template<typename T> void radb4(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=4; constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; for (size_t k=0; k<l1; k++) { T tr1, tr2; PM (tr2,tr1,CC(0,0,k),CC(ido-1,3,k)); T tr3=2*CC(ido-1,1,k); T tr4=2*CC(0,2,k); PM (CH(0,k,0),CH(0,k,2),tr2,tr3); PM (CH(0,k,3),CH(0,k,1),tr1,tr4); } if ((ido&1)==0) for (size_t k=0; k<l1; k++) { T tr1,tr2,ti1,ti2; PM (ti1,ti2,CC(0 ,3,k),CC(0 ,1,k)); PM (tr2,tr1,CC(ido-1,0,k),CC(ido-1,2,k)); CH(ido-1,k,0)=tr2+tr2; CH(ido-1,k,1)=sqrt2*(tr1-ti1); CH(ido-1,k,2)=ti2+ti2; CH(ido-1,k,3)=-sqrt2*(tr1+ti1); } if (ido<=2) return; for (size_t k=0; k<l1;++k) for (size_t i=2; i<ido; i+=2) { T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; size_t ic=ido-i; PM (tr2,tr1,CC(i-1,0,k),CC(ic-1,3,k)); PM (ti1,ti2,CC(i ,0,k),CC(ic ,3,k)); PM (tr4,ti3,CC(i ,2,k),CC(ic ,1,k)); PM (tr3,ti4,CC(i-1,2,k),CC(ic-1,1,k)); PM (CH(i-1,k,0),cr3,tr2,tr3); PM (CH(i ,k,0),ci3,ti2,ti3); PM (cr4,cr2,tr1,tr4); PM (ci2,ci4,ti1,ti4); MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ci2,cr2); MULPM (CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),ci3,cr3); MULPM (CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),ci4,cr4); } } template<typename T> void radb5(size_t ido, size_t l1, const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa) { constexpr size_t cdim=5; constexpr T0 tr11= T0(0.3090169943749474241022934171828191L), ti11= T0(0.9510565162951535721164393333793821L), tr12= T0(-0.8090169943749474241022934171828191L), ti12= T0(0.5877852522924731291687059546390728L); auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; }; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; for (size_t k=0; k<l1; k++) { T ti5=CC(0,2,k)+CC(0,2,k); T ti4=CC(0,4,k)+CC(0,4,k); T tr2=CC(ido-1,1,k)+CC(ido-1,1,k); T tr3=CC(ido-1,3,k)+CC(ido-1,3,k); CH(0,k,0)=CC(0,0,k)+tr2+tr3; T cr2=CC(0,0,k)+tr11*tr2+tr12*tr3; T cr3=CC(0,0,k)+tr12*tr2+tr11*tr3; T ci4, ci5; MULPM(ci5,ci4,ti5,ti4,ti11,ti12); PM(CH(0,k,4),CH(0,k,1),cr2,ci5); PM(CH(0,k,3),CH(0,k,2),cr3,ci4); } if (ido==1) return; for (size_t k=0; k<l1;++k) for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2) { T tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5; PM(tr2,tr5,CC(i-1,2,k),CC(ic-1,1,k)); PM(ti5,ti2,CC(i ,2,k),CC(ic ,1,k)); PM(tr3,tr4,CC(i-1,4,k),CC(ic-1,3,k)); PM(ti4,ti3,CC(i ,4,k),CC(ic ,3,k)); CH(i-1,k,0)=CC(i-1,0,k)+tr2+tr3; CH(i ,k,0)=CC(i ,0,k)+ti2+ti3; T cr2=CC(i-1,0,k)+tr11*tr2+tr12*tr3; T ci2=CC(i ,0,k)+tr11*ti2+tr12*ti3; T cr3=CC(i-1,0,k)+tr12*tr2+tr11*tr3; T ci3=CC(i ,0,k)+tr12*ti2+tr11*ti3; T ci4, ci5, cr5, cr4; MULPM(cr5,cr4,tr5,tr4,ti11,ti12); MULPM(ci5,ci4,ti5,ti4,ti11,ti12); T dr2, dr3, dr4, dr5, di2, di3, di4, di5; PM(dr4,dr3,cr3,ci4); PM(di3,di4,ci3,cr4); PM(dr5,dr2,cr2,ci5); PM(di2,di5,ci2,cr5); MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2); MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3); MULPM(CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),di4,dr4); MULPM(CH(i,k,4),CH(i-1,k,4),WA(3,i-2),WA(3,i-1),di5,dr5); } } template<typename T> void radbg(size_t ido, size_t ip, size_t l1, T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch, const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr) { const size_t cdim=ip; size_t ipph=(ip+1)/ 2; size_t idl1 = ido*l1; auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+cdim*c)]; }; auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T& { return ch[a+ido*(b+l1*c)]; }; auto C1 = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T& { return cc[a+ido*(b+l1*c)]; }; auto C2 = [cc,idl1](size_t a, size_t b) -> T& { return cc[a+idl1*b]; }; auto CH2 = [ch,idl1](size_t a, size_t b) -> T& { return ch[a+idl1*b]; }; for (size_t k=0; k<l1; ++k) // 102 for (size_t i=0; i<ido; ++i) // 101 CH(i,k,0) = CC(i,0,k); for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 108 { size_t j2=2*j-1; for (size_t k=0; k<l1; ++k) { CH(0,k,j ) = 2*CC(ido-1,j2,k); CH(0,k,jc) = 2*CC(0,j2+1,k); } } if (ido!=1) { for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 111 { size_t j2=2*j-1; for (size_t k=0; k<l1; ++k) for (size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 109 { CH(i ,k,j ) = CC(i ,j2+1,k)+CC(ic ,j2,k); CH(i ,k,jc) = CC(i ,j2+1,k)-CC(ic ,j2,k); CH(i+1,k,j ) = CC(i+1,j2+1,k)-CC(ic+1,j2,k); CH(i+1,k,jc) = CC(i+1,j2+1,k)+CC(ic+1,j2,k); } } } for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) { for (size_t ik=0; ik<idl1; ++ik) { C2(ik,l ) = CH2(ik,0)+csarr[2*l]*CH2(ik,1)+csarr[4*l]*CH2(ik,2); C2(ik,lc) = csarr[2*l+1]*CH2(ik,ip-1)+csarr[4*l+1]*CH2(ik,ip-2); } size_t iang=2*l; size_t j=3,jc=ip-3; for(; j<ipph-3; j+=4,jc-=4) { iang+=l; if(iang>ip) iang-=ip; T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1]; iang+=l; if(iang>ip) iang-=ip; T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1]; iang+=l; if(iang>ip) iang-=ip; T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1]; iang+=l; if(iang>ip) iang-=ip; T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) { C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1) +ar3*CH2(ik,j +2)+ar4*CH2(ik,j +3); C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1) +ai3*CH2(ik,jc-2)+ai4*CH2(ik,jc-3); } } for(; j<ipph-1; j+=2,jc-=2) { iang+=l; if(iang>ip) iang-=ip; T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1]; iang+=l; if(iang>ip) iang-=ip; T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) { C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1); C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1); } } for(; j<ipph; ++j,--jc) { iang+=l; if(iang>ip) iang-=ip; T0 war=csarr[2*iang], wai=csarr[2*iang+1]; for (size_t ik=0; ik<idl1; ++ik) { C2(ik,l ) += war*CH2(ik,j ); C2(ik,lc) += wai*CH2(ik,jc); } } } for (size_t j=1; j<ipph; ++j) for (size_t ik=0; ik<idl1; ++ik) CH2(ik,0) += CH2(ik,j); for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 124 for (size_t k=0; k<l1; ++k) { CH(0,k,j ) = C1(0,k,j)-C1(0,k,jc); CH(0,k,jc) = C1(0,k,j)+C1(0,k,jc); } if (ido==1) return; for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 127 for (size_t k=0; k<l1; ++k) for (size_t i=1; i<=ido-2; i+=2) { CH(i ,k,j ) = C1(i ,k,j)-C1(i+1,k,jc); CH(i ,k,jc) = C1(i ,k,j)+C1(i+1,k,jc); CH(i+1,k,j ) = C1(i+1,k,j)+C1(i ,k,jc); CH(i+1,k,jc) = C1(i+1,k,j)-C1(i ,k,jc); } // All in CH for (size_t j=1; j<ip; ++j) { size_t is = (j-1)*(ido-1); for (size_t k=0; k<l1; ++k) { size_t idij = is; for (size_t i=1; i<=ido-2; i+=2) { T t1=CH(i,k,j), t2=CH(i+1,k,j); CH(i ,k,j) = wa[idij]*t1-wa[idij+1]*t2; CH(i+1,k,j) = wa[idij]*t2+wa[idij+1]*t1; idij+=2; } } } } template<typename T> void copy_and_norm(T *c, T *p1, size_t n, T0 fct) { if (p1!=c) { if (fct!=1.) for (size_t i=0; i<n; ++i) c[i] = fct*p1[i]; else memcpy (c,p1,n*sizeof(T)); } else if (fct!=1.) for (size_t i=0; i<n; ++i) c[i] *= fct; } public: template<typename T> void forward(T c[], T0 fct) { if (length==1) { c[0]*=fct; return; } size_t n=length; size_t l1=n, nf=fact.size(); arr<T> ch(n); T *p1=c, *p2=ch.data(); for(size_t k1=0; k1<nf;++k1) { size_t k=nf-k1-1; size_t ip=fact[k].fct; size_t ido=n / l1; l1 /= ip; if(ip==4) radf4(ido, l1, p1, p2, fact[k].tw); else if(ip==2) radf2(ido, l1, p1, p2, fact[k].tw); else if(ip==3) radf3(ido, l1, p1, p2, fact[k].tw); else if(ip==5) radf5(ido, l1, p1, p2, fact[k].tw); else { radfg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); swap (p1,p2); } swap (p1,p2); } copy_and_norm(c,p1,n,fct); } template<typename T> void backward(T c[], T0 fct) { if (length==1) { c[0]*=fct; return; } size_t n=length; size_t l1=1, nf=fact.size(); arr<T> ch(n); T *p1=c, *p2=ch.data(); for(size_t k=0; k<nf; k++) { size_t ip = fact[k].fct, ido= n/(ip*l1); if(ip==4) radb4(ido, l1, p1, p2, fact[k].tw); else if(ip==2) radb2(ido, l1, p1, p2, fact[k].tw); else if(ip==3) radb3(ido, l1, p1, p2, fact[k].tw); else if(ip==5) radb5(ido, l1, p1, p2, fact[k].tw); else radbg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); swap (p1,p2); l1*=ip; } copy_and_norm(c,p1,n,fct); } private: void factorize() { size_t len=length; while ((len%4)==0) { add_factor(4); len>>=2; } if ((len%2)==0) { len>>=1; // factor 2 should be at the front of the factor list add_factor(2); swap(fact[0].fct, fact.back().fct); } for (size_t divisor=3; divisor*divisor<=len; divisor+=2) while ((len%divisor)==0) { add_factor(divisor); len/=divisor; } if (len>1) add_factor(len); } size_t twsize() const { size_t twsz=0, l1=1; for (size_t k=0; k<fact.size(); ++k) { size_t ip=fact[k].fct, ido=length/(l1*ip); twsz+=(ip-1)*(ido-1); if (ip>5) twsz+=2*ip; l1*=ip; } return twsz; } void comp_twiddle() { sincos_2pibyn<T0> twid(length, true); size_t l1=1; T0 *ptr=mem.data(); for (size_t k=0; k<fact.size(); ++k) { size_t ip=fact[k].fct, ido=length/(l1*ip); if (k<fact.size()-1) // last factor doesn't need twiddles { fact[k].tw=ptr; ptr+=(ip-1)*(ido-1); for (size_t j=1; j<ip; ++j) for (size_t i=1; i<=(ido-1)/2; ++i) { fact[k].tw[(j-1)*(ido-1)+2*i-2] = twid[2*j*l1*i]; fact[k].tw[(j-1)*(ido-1)+2*i-1] = twid[2*j*l1*i+1]; } } if (ip>5) // special factors required by *g functions { fact[k].tws=ptr; ptr+=2*ip; fact[k].tws[0] = 1.; fact[k].tws[1] = 0.; for (size_t i=2, ic=2*ip-2; i<=ic; i+=2, ic-=2) { fact[k].tws[i ] = twid[i*(length/ip)]; fact[k].tws[i+1] = twid[i*(length/ip)+1]; fact[k].tws[ic] = twid[i*(length/ip)]; fact[k].tws[ic+1] = -twid[i*(length/ip)+1]; } } l1*=ip; } } public: POCKETFFT_NOINLINE rfftp(size_t length_) : length(length_) { if (length==0) throw runtime_error("zero-sized FFT"); if (length==1) return; factorize(); mem.resize(twsize()); comp_twiddle(); } }; // // complex Bluestein transforms // template<typename T0> class fftblue { private: size_t n, n2; cfftp<T0> plan; arr<cmplx<T0>> mem; cmplx<T0> *bk, *bkf; template<bool fwd, typename T> void fft(cmplx<T> c[], T0 fct) { arr<cmplx<T>> akf(n2); /* initialize a_k and FFT it */ for (size_t m=0; m<n; ++m) akf[m] = c[m].template special_mul<fwd>(bk[m]); auto zero = akf[0]*T0(0); for (size_t m=n; m<n2; ++m) akf[m]=zero; plan.forward (akf.data(),1.); /* do the convolution */ for (size_t m=0; m<n2; ++m) akf[m] = akf[m].template special_mul<!fwd>(bkf[m]); /* inverse FFT */ plan.backward (akf.data(),1.); /* multiply by b_k */ for (size_t m=0; m<n; ++m) c[m] = akf[m].template special_mul<fwd>(bk[m])*fct; } public: POCKETFFT_NOINLINE fftblue(size_t length) : n(length), n2(util::good_size(n*2-1)), plan(n2), mem(n+n2), bk(mem.data()), bkf(mem.data()+n) { /* initialize b_k */ sincos_2pibyn<T0> tmp_(2*n, false); auto tmp = tmp_.cdata(); bk[0].Set(1, 0); size_t coeff=0; for (size_t m=1; m<n; ++m) { coeff+=2*m-1; if (coeff>=2*n) coeff-=2*n; bk[m] = tmp[coeff]; } /* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */ T0 xn2 = T0(1)/T0(n2); bkf[0] = bk[0]*xn2; for (size_t m=1; m<n; ++m) bkf[m] = bkf[n2-m] = bk[m]*xn2; for (size_t m=n;m<=(n2-n);++m) bkf[m].Set(0.,0.); plan.forward(bkf,1.); } template<typename T> void backward(cmplx<T> c[], T0 fct) { fft<false>(c,fct); } template<typename T> void forward(cmplx<T> c[], T0 fct) { fft<true>(c,fct); } template<typename T> void backward_r(T c[], T0 fct) { arr<cmplx<T>> tmp(n); tmp[0].Set(c[0],c[0]*0); memcpy (reinterpret_cast<void *>(tmp.data()+1), reinterpret_cast<void *>(c+1), (n-1)*sizeof(T)); if ((n&1)==0) tmp[n/2].i=T0(0)*c[0]; for (size_t m=1; 2*m<n; ++m) tmp[n-m].Set(tmp[m].r, -tmp[m].i); fft<false>(tmp.data(),fct); for (size_t m=0; m<n; ++m) c[m] = tmp[m].r; } template<typename T> void forward_r(T c[], T0 fct) { arr<cmplx<T>> tmp(n); auto zero = T0(0)*c[0]; for (size_t m=0; m<n; ++m) tmp[m].Set(c[m], zero); fft<true>(tmp.data(),fct); c[0] = tmp[0].r; memcpy (c+1, tmp.data()+1, (n-1)*sizeof(T)); } }; // // flexible (FFTPACK/Bluestein) complex 1D transform // template<typename T0> class pocketfft_c { private: unique_ptr<cfftp<T0>> packplan; unique_ptr<fftblue<T0>> blueplan; size_t len; public: POCKETFFT_NOINLINE pocketfft_c(size_t length) : len(length) { if (length==0) throw runtime_error("zero-length FFT requested"); size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length); if (tmp*tmp <= length) { packplan=unique_ptr<cfftp<T0>>(new cfftp<T0>(length)); return; } double comp1 = util::cost_guess(length); double comp2 = 2*util::cost_guess(util::good_size(2*length-1)); comp2*=1.5; /* fudge factor that appears to give good overall performance */ if (comp2<comp1) // use Bluestein blueplan=unique_ptr<fftblue<T0>>(new fftblue<T0>(length)); else packplan=unique_ptr<cfftp<T0>>(new cfftp<T0>(length)); } template<typename T> POCKETFFT_NOINLINE void backward(cmplx<T> c[], T0 fct) { packplan ? packplan->backward(c,fct) : blueplan->backward(c,fct); } template<typename T> POCKETFFT_NOINLINE void forward(cmplx<T> c[], T0 fct) { packplan ? packplan->forward(c,fct) : blueplan->forward(c,fct); } size_t length() const { return len; } }; // // flexible (FFTPACK/Bluestein) real-valued 1D transform // template<typename T0> class pocketfft_r { private: unique_ptr<rfftp<T0>> packplan; unique_ptr<fftblue<T0>> blueplan; size_t len; public: POCKETFFT_NOINLINE pocketfft_r(size_t length) : len(length) { if (length==0) throw runtime_error("zero-length FFT requested"); size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length); if (tmp*tmp <= length) { packplan=unique_ptr<rfftp<T0>>(new rfftp<T0>(length)); return; } double comp1 = 0.5*util::cost_guess(length); double comp2 = 2*util::cost_guess(util::good_size(2*length-1)); comp2*=1.5; /* fudge factor that appears to give good overall performance */ if (comp2<comp1) // use Bluestein blueplan=unique_ptr<fftblue<T0>>(new fftblue<T0>(length)); else packplan=unique_ptr<rfftp<T0>>(new rfftp<T0>(length)); } template<typename T> POCKETFFT_NOINLINE void backward(T c[], T0 fct) { packplan ? packplan->backward(c,fct) : blueplan->backward_r(c,fct); } template<typename T> POCKETFFT_NOINLINE void forward(T c[], T0 fct) { packplan ? packplan->forward(c,fct) : blueplan->forward_r(c,fct); } size_t length() const { return len; } }; // // multi-D infrastructure // template<typename T> shared_ptr<T> get_plan(size_t length) { #if POCKETFFT_CACHE_SIZE==0 return make_shared<T>(length); #else constexpr size_t nmax=POCKETFFT_CACHE_SIZE; static array<shared_ptr<T>, nmax> cache; static array<size_t, nmax> last_access{{0}}; static size_t access_counter = 0; static mutex mut; auto find_in_cache = [&]() -> shared_ptr<T> { for (size_t i=0; i<nmax; ++i) if (cache[i] && (cache[i]->length()==length)) { // no need to update if this is already the most recent entry if (last_access[i]!=access_counter) { last_access[i] = ++access_counter; // Guard against overflow if (access_counter == 0) last_access.fill(0); } return cache[i]; } return nullptr; }; { lock_guard<mutex> lock(mut); auto p = find_in_cache(); if (p) return p; } auto plan = make_shared<T>(length); { lock_guard<mutex> lock(mut); auto p = find_in_cache(); if (p) return p; size_t lru = 0; for (size_t i=1; i<nmax; ++i) if (last_access[i] < last_access[lru]) lru = i; cache[lru] = plan; last_access[lru] = ++access_counter; } return plan; #endif } class arr_info { protected: shape_t shp; stride_t str; public: arr_info(const shape_t &shape_, const stride_t &stride_) : shp(shape_), str(stride_) {} size_t ndim() const { return shp.size(); } size_t size() const { return util::prod(shp); } const shape_t &shape() const { return shp; } size_t shape(size_t i) const { return shp[i]; } const stride_t &stride() const { return str; } const ptrdiff_t &stride(size_t i) const { return str[i]; } }; template<typename T> class cndarr: public arr_info { protected: const char *d; public: cndarr(const void *data_, const shape_t &shape_, const stride_t &stride_) : arr_info(shape_, stride_), d(reinterpret_cast<const char *>(data_)) {} const T &operator[](ptrdiff_t ofs) const { return *reinterpret_cast<const T *>(d+ofs); } }; template<typename T> class ndarr: public cndarr<T> { public: ndarr(void *data_, const shape_t &shape_, const stride_t &stride_) : cndarr<T>::cndarr(const_cast<const void *>(data_), shape_, stride_) {} T &operator[](ptrdiff_t ofs) { return *reinterpret_cast<T *>(const_cast<char *>(cndarr<T>::d+ofs)); } }; template<size_t N> class multi_iter { private: shape_t pos; const arr_info &iarr, &oarr; ptrdiff_t p_ii, p_i[N], str_i, p_oi, p_o[N], str_o; size_t idim, rem; void advance_i() { for (int i_=int(pos.size())-1; i_>=0; --i_) { auto i = size_t(i_); if (i==idim) continue; p_ii += iarr.stride(i); p_oi += oarr.stride(i); if (++pos[i] < iarr.shape(i)) return; pos[i] = 0; p_ii -= ptrdiff_t(iarr.shape(i))*iarr.stride(i); p_oi -= ptrdiff_t(oarr.shape(i))*oarr.stride(i); } } public: multi_iter(const arr_info &iarr_, const arr_info &oarr_, size_t idim_) : pos(iarr_.ndim(), 0), iarr(iarr_), oarr(oarr_), p_ii(0), str_i(iarr.stride(idim_)), p_oi(0), str_o(oarr.stride(idim_)), idim(idim_), rem(iarr.size()/iarr.shape(idim)) { auto nshares = util::nthreads(); if (nshares==1) return; if (nshares==0) throw runtime_error("can't run with zero threads"); auto myshare = util::thread_num(); if (myshare>=nshares) throw runtime_error("impossible share requested"); size_t nbase = rem/nshares; size_t additional = rem%nshares; size_t lo = myshare*nbase + ((myshare<additional) ? myshare : additional); size_t hi = lo+nbase+(myshare<additional); size_t todo = hi-lo; size_t chunk = rem; for (size_t i=0; i<pos.size(); ++i) { if (i==idim) continue; chunk /= iarr.shape(i); size_t n_advance = lo/chunk; pos[i] += n_advance; p_ii += ptrdiff_t(n_advance)*iarr.stride(i); p_oi += ptrdiff_t(n_advance)*oarr.stride(i); lo -= n_advance*chunk; } rem = todo; } void advance(size_t n) { if (rem<n) throw runtime_error("underrun"); for (size_t i=0; i<n; ++i) { p_i[i] = p_ii; p_o[i] = p_oi; advance_i(); } rem -= n; } ptrdiff_t iofs(size_t i) const { return p_i[0] + ptrdiff_t(i)*str_i; } ptrdiff_t iofs(size_t j, size_t i) const { return p_i[j] + ptrdiff_t(i)*str_i; } ptrdiff_t oofs(size_t i) const { return p_o[0] + ptrdiff_t(i)*str_o; } ptrdiff_t oofs(size_t j, size_t i) const { return p_o[j] + ptrdiff_t(i)*str_o; } size_t length_in() const { return iarr.shape(idim); } size_t length_out() const { return oarr.shape(idim); } ptrdiff_t stride_in() const { return str_i; } ptrdiff_t stride_out() const { return str_o; } size_t remaining() const { return rem; } }; class simple_iter { private: shape_t pos; const arr_info &arr; ptrdiff_t p; size_t rem; public: simple_iter(const arr_info &arr_) : pos(arr_.ndim(), 0), arr(arr_), p(0), rem(arr_.size()) {} void advance() { --rem; for (int i_=int(pos.size())-1; i_>=0; --i_) { auto i = size_t(i_); p += arr.stride(i); if (++pos[i] < arr.shape(i)) return; pos[i] = 0; p -= ptrdiff_t(arr.shape(i))*arr.stride(i); } } ptrdiff_t ofs() const { return p; } size_t remaining() const { return rem; } }; class rev_iter { private: shape_t pos; const arr_info &arr; vector<char> rev_axis; vector<char> rev_jump; size_t last_axis, last_size; shape_t shp; ptrdiff_t p, rp; size_t rem; public: rev_iter(const arr_info &arr_, const shape_t &axes) : pos(arr_.ndim(), 0), arr(arr_), rev_axis(arr_.ndim(), 0), rev_jump(arr_.ndim(), 1), p(0), rp(0) { for (auto ax: axes) rev_axis[ax]=1; last_axis = axes.back(); last_size = arr.shape(last_axis)/2 + 1; shp = arr.shape(); shp[last_axis] = last_size; rem=1; for (auto i: shp) rem *= i; } void advance() { --rem; for (int i_=int(pos.size())-1; i_>=0; --i_) { auto i = size_t(i_); p += arr.stride(i); if (!rev_axis[i]) rp += arr.stride(i); else { rp -= arr.stride(i); if (rev_jump[i]) { rp += ptrdiff_t(arr.shape(i))*arr.stride(i); rev_jump[i] = 0; } } if (++pos[i] < shp[i]) return; pos[i] = 0; p -= ptrdiff_t(shp[i])*arr.stride(i); if (rev_axis[i]) { rp -= ptrdiff_t(arr.shape(i)-shp[i])*arr.stride(i); rev_jump[i] = 1; } else rp -= ptrdiff_t(shp[i])*arr.stride(i); } } ptrdiff_t ofs() const { return p; } ptrdiff_t rev_ofs() const { return rp; } size_t remaining() const { return rem; } }; #ifndef POCKETFFT_NO_VECTORS template<typename T> struct VTYPE {}; template<> struct VTYPE<float> { using type = float __attribute__ ((vector_size (VLEN<float>::val*sizeof(float)))); }; template<> struct VTYPE<double> { using type = double __attribute__ ((vector_size (VLEN<double>::val*sizeof(double)))); }; template<> struct VTYPE<long double> { using type = long double __attribute__ ((vector_size (VLEN<long double>::val*sizeof(long double)))); }; #endif template<typename T> arr<char> alloc_tmp(const shape_t &shape, size_t axsize, size_t elemsize) { auto othersize = util::prod(shape)/axsize; auto tmpsize = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1); return arr<char>(tmpsize*elemsize); } template<typename T> arr<char> alloc_tmp(const shape_t &shape, const shape_t &axes, size_t elemsize) { size_t fullsize=util::prod(shape); size_t tmpsize=0; for (size_t i=0; i<axes.size(); ++i) { auto axsize = shape[axes[i]]; auto othersize = fullsize/axsize; auto sz = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1); if (sz>tmpsize) tmpsize=sz; } return arr<char>(tmpsize*elemsize); } #ifdef POCKETFFT_OPENMP #define POCKETFFT_NTHREADS nthreads #else #define POCKETFFT_NTHREADS #endif template<typename T> POCKETFFT_NOINLINE void general_c( const cndarr<cmplx<T>> &in, ndarr<cmplx<T>> &out, const shape_t &axes, bool forward, T fct, size_t POCKETFFT_NTHREADS) { shared_ptr<pocketfft_c<T>> plan; for (size_t iax=0; iax<axes.size(); ++iax) { constexpr auto vlen = VLEN<T>::val; size_t len=in.shape(axes[iax]); if ((!plan) || (len!=plan->length())) plan = get_plan<pocketfft_c<T>>(len); #ifdef POCKETFFT_OPENMP #pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax])) #endif { auto storage = alloc_tmp<T>(in.shape(), len, sizeof(cmplx<T>)); const auto &tin(iax==0? in : out); multi_iter<vlen> it(tin, out, axes[iax]); #ifndef POCKETFFT_NO_VECTORS if (vlen>1) while (it.remaining()>=vlen) { using vtype = typename VTYPE<T>::type; it.advance(vlen); auto tdatav = reinterpret_cast<cmplx<vtype> *>(storage.data()); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) { tdatav[i].r[j] = tin[it.iofs(j,i)].r; tdatav[i].i[j] = tin[it.iofs(j,i)].i; } forward ? plan->forward (tdatav, fct) : plan->backward(tdatav, fct); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,i)].Set(tdatav[i].r[j],tdatav[i].i[j]); } #endif while (it.remaining()>0) { it.advance(1); auto tdata = reinterpret_cast<cmplx<T> *>(storage.data()); if ((&tin[0]==&out[0]) && (it.stride_out()==sizeof(cmplx<T>))) // fully in-place forward ? plan->forward (&out[it.oofs(0)], fct) : plan->backward(&out[it.oofs(0)], fct); else if (it.stride_out()==sizeof(cmplx<T>)) // compute FFT in output location { for (size_t i=0; i<len; ++i) out[it.oofs(i)] = tin[it.iofs(i)]; forward ? plan->forward (&out[it.oofs(0)], fct) : plan->backward(&out[it.oofs(0)], fct); } else { for (size_t i=0; i<len; ++i) tdata[i] = tin[it.iofs(i)]; forward ? plan->forward (tdata, fct) : plan->backward(tdata, fct); for (size_t i=0; i<len; ++i) out[it.oofs(i)] = tdata[i]; } } } // end of parallel region fct = T(1); // factor has been applied, use 1 for remaining axes } } template<typename T> POCKETFFT_NOINLINE void general_hartley( const cndarr<T> &in, ndarr<T> &out, const shape_t &axes, T fct, size_t POCKETFFT_NTHREADS) { shared_ptr<pocketfft_r<T>> plan; for (size_t iax=0; iax<axes.size(); ++iax) { constexpr auto vlen = VLEN<T>::val; size_t len=in.shape(axes[iax]); if ((!plan) || (len!=plan->length())) plan = get_plan<pocketfft_r<T>>(len); #ifdef POCKETFFT_OPENMP #pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax])) #endif { auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T)); const auto &tin(iax==0 ? in : out); multi_iter<vlen> it(tin, out, axes[iax]); #ifndef POCKETFFT_NO_VECTORS if (vlen>1) while (it.remaining()>=vlen) { using vtype = typename VTYPE<T>::type; it.advance(vlen); auto tdatav = reinterpret_cast<vtype *>(storage.data()); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = tin[it.iofs(j,i)]; plan->forward(tdatav, fct); for (size_t j=0; j<vlen; ++j) out[it.oofs(j,0)] = tdatav[0][j]; size_t i=1, i1=1, i2=len-1; for (i=1; i<len-1; i+=2, ++i1, --i2) for (size_t j=0; j<vlen; ++j) { out[it.oofs(j,i1)] = tdatav[i][j]+tdatav[i+1][j]; out[it.oofs(j,i2)] = tdatav[i][j]-tdatav[i+1][j]; } if (i<len) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,i1)] = tdatav[i][j]; } #endif while (it.remaining()>0) { it.advance(1); auto tdata = reinterpret_cast<T *>(storage.data()); for (size_t i=0; i<len; ++i) tdata[i] = tin[it.iofs(i)]; plan->forward(tdata, fct); // Hartley order out[it.oofs(0)] = tdata[0]; size_t i=1, i1=1, i2=len-1; for (i=1; i<len-1; i+=2, ++i1, --i2) { out[it.oofs(i1)] = tdata[i]+tdata[i+1]; out[it.oofs(i2)] = tdata[i]-tdata[i+1]; } if (i<len) out[it.oofs(i1)] = tdata[i]; } } // end of parallel region fct = T(1); // factor has been applied, use 1 for remaining axes } } template<typename T> POCKETFFT_NOINLINE void general_r2c( const cndarr<T> &in, ndarr<cmplx<T>> &out, size_t axis, bool forward, T fct, size_t POCKETFFT_NTHREADS) { auto plan = get_plan<pocketfft_r<T>>(in.shape(axis)); constexpr auto vlen = VLEN<T>::val; size_t len=in.shape(axis); #ifdef POCKETFFT_OPENMP #pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axis)) #endif { auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T)); multi_iter<vlen> it(in, out, axis); #ifndef POCKETFFT_NO_VECTORS if (vlen>1) while (it.remaining()>=vlen) { using vtype = typename VTYPE<T>::type; it.advance(vlen); auto tdatav = reinterpret_cast<vtype *>(storage.data()); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = in[it.iofs(j,i)]; plan->forward(tdatav, fct); for (size_t j=0; j<vlen; ++j) out[it.oofs(j,0)].Set(tdatav[0][j]); size_t i=1, ii=1; if (forward) for (; i<len-1; i+=2, ++ii) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,ii)].Set(tdatav[i][j], tdatav[i+1][j]); else for (; i<len-1; i+=2, ++ii) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,ii)].Set(tdatav[i][j], -tdatav[i+1][j]); if (i<len) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,ii)].Set(tdatav[i][j]); } #endif while (it.remaining()>0) { it.advance(1); auto tdata = reinterpret_cast<T *>(storage.data()); for (size_t i=0; i<len; ++i) tdata[i] = in[it.iofs(i)]; plan->forward(tdata, fct); out[it.oofs(0)].Set(tdata[0]); size_t i=1, ii=1; if (forward) for (; i<len-1; i+=2, ++ii) out[it.oofs(ii)].Set(tdata[i], tdata[i+1]); else for (; i<len-1; i+=2, ++ii) out[it.oofs(ii)].Set(tdata[i], -tdata[i+1]); if (i<len) out[it.oofs(ii)].Set(tdata[i]); } } // end of parallel region } template<typename T> POCKETFFT_NOINLINE void general_c2r( const cndarr<cmplx<T>> &in, ndarr<T> &out, size_t axis, bool forward, T fct, size_t POCKETFFT_NTHREADS) { auto plan = get_plan<pocketfft_r<T>>(out.shape(axis)); constexpr auto vlen = VLEN<T>::val; size_t len=out.shape(axis); #ifdef POCKETFFT_OPENMP #pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axis)) #endif { auto storage = alloc_tmp<T>(out.shape(), len, sizeof(T)); multi_iter<vlen> it(in, out, axis); #ifndef POCKETFFT_NO_VECTORS if (vlen>1) while (it.remaining()>=vlen) { using vtype = typename VTYPE<T>::type; it.advance(vlen); auto tdatav = reinterpret_cast<vtype *>(storage.data()); for (size_t j=0; j<vlen; ++j) tdatav[0][j]=in[it.iofs(j,0)].r; { size_t i=1, ii=1; if (forward) for (; i<len-1; i+=2, ++ii) for (size_t j=0; j<vlen; ++j) { tdatav[i ][j] = in[it.iofs(j,ii)].r; tdatav[i+1][j] = -in[it.iofs(j,ii)].i; } else for (; i<len-1; i+=2, ++ii) for (size_t j=0; j<vlen; ++j) { tdatav[i ][j] = in[it.iofs(j,ii)].r; tdatav[i+1][j] = in[it.iofs(j,ii)].i; } if (i<len) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = in[it.iofs(j,ii)].r; } plan->backward(tdatav, fct); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,i)] = tdatav[i][j]; } #endif while (it.remaining()>0) { it.advance(1); auto tdata = reinterpret_cast<T *>(storage.data()); tdata[0]=in[it.iofs(0)].r; { size_t i=1, ii=1; if (forward) for (; i<len-1; i+=2, ++ii) { tdata[i ] = in[it.iofs(ii)].r; tdata[i+1] = -in[it.iofs(ii)].i; } else for (; i<len-1; i+=2, ++ii) { tdata[i ] = in[it.iofs(ii)].r; tdata[i+1] = in[it.iofs(ii)].i; } if (i<len) tdata[i] = in[it.iofs(ii)].r; } plan->backward(tdata, fct); for (size_t i=0; i<len; ++i) out[it.oofs(i)] = tdata[i]; } } // end of parallel region } template<typename T> POCKETFFT_NOINLINE void general_r( const cndarr<T> &in, ndarr<T> &out, const shape_t &axes, bool r2c, bool forward, T fct, size_t POCKETFFT_NTHREADS) { shared_ptr<pocketfft_r<T>> plan; for (size_t iax=0; iax<axes.size(); ++iax) { constexpr auto vlen = VLEN<T>::val; size_t len=in.shape(axes[iax]); if ((!plan) || (len!=plan->length())) plan = get_plan<pocketfft_r<T>>(len); #ifdef POCKETFFT_OPENMP #pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax])) #endif { auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T)); const auto &tin(iax==0 ? in : out); multi_iter<vlen> it(tin, out, axes[iax]); #ifndef POCKETFFT_NO_VECTORS if (vlen>1) while (it.remaining()>=vlen) { using vtype = typename VTYPE<T>::type; it.advance(vlen); auto tdatav = reinterpret_cast<vtype *>(storage.data()); for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = tin[it.iofs(j,i)]; if ((!r2c) && forward) for (size_t i=2; i<len; i+=2) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = -tdatav[i][j]; forward ? plan->forward (tdatav, fct) : plan->backward(tdatav, fct); if (r2c && (!forward)) for (size_t i=2; i<len; i+=2) for (size_t j=0; j<vlen; ++j) tdatav[i][j] = -tdatav[i][j]; for (size_t i=0; i<len; ++i) for (size_t j=0; j<vlen; ++j) out[it.oofs(j,i)] = tdatav[i][j]; } #endif while (it.remaining()>0) { it.advance(1); auto tdata = reinterpret_cast<T *>(storage.data()); if ((&tin[0]==&out[0]) && (it.stride_out()==sizeof(T))) // fully in-place { if ((!r2c) && forward) for (size_t i=2; i<len; i+=2) out[it.oofs(i)] = -out[it.oofs(i)]; forward ? plan->forward (&out[it.oofs(0)], fct) : plan->backward(&out[it.oofs(0)], fct); if (r2c && (!forward)) for (size_t i=2; i<len; i+=2) out[it.oofs(i)] = -out[it.oofs(i)]; } else if (it.stride_out()==sizeof(T)) // compute FFT in output location { for (size_t i=0; i<len; ++i) out[it.oofs(i)] = tin[it.iofs(i)]; if ((!r2c) && forward) for (size_t i=2; i<len; i+=2) out[it.oofs(i)] = -out[it.oofs(i)]; forward ? plan->forward (&out[it.oofs(0)], fct) : plan->backward(&out[it.oofs(0)], fct); if (r2c && (!forward)) for (size_t i=2; i<len; i+=2) out[it.oofs(i)] = -out[it.oofs(i)]; } else { for (size_t i=0; i<len; ++i) tdata[i] = tin[it.iofs(i)]; if ((!r2c) && forward) for (size_t i=2; i<len; i+=2) tdata[i] = -tdata[i]; forward ? plan->forward (tdata, fct) : plan->backward(tdata, fct); if (r2c && (!forward)) for (size_t i=2; i<len; i+=2) tdata[i] = -tdata[i]; for (size_t i=0; i<len; ++i) out[it.oofs(i)] = tdata[i]; } } } // end of parallel region fct = T(1); // factor has been applied, use 1 for remaining axes } } #undef POCKETFFT_NTHREADS template<typename T> void c2c(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const complex<T> *data_in, complex<T> *data_out, T fct, size_t nthreads=1) { if (util::prod(shape)==0) return; util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes); cndarr<cmplx<T>> ain(data_in, shape, stride_in); ndarr<cmplx<T>> aout(data_out, shape, stride_out); general_c(ain, aout, axes, forward, fct, nthreads); } template<typename T> void r2c(const shape_t &shape_in, const stride_t &stride_in, const stride_t &stride_out, size_t axis, bool forward, const T *data_in, complex<T> *data_out, T fct, size_t nthreads=1) { if (util::prod(shape_in)==0) return; util::sanity_check(shape_in, stride_in, stride_out, false, axis); cndarr<T> ain(data_in, shape_in, stride_in); shape_t shape_out(shape_in); shape_out[axis] = shape_in[axis]/2 + 1; ndarr<cmplx<T>> aout(data_out, shape_out, stride_out); general_r2c(ain, aout, axis, forward, fct, nthreads); } template<typename T> void r2c(const shape_t &shape_in, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const T *data_in, complex<T> *data_out, T fct, size_t nthreads=1) { if (util::prod(shape_in)==0) return; util::sanity_check(shape_in, stride_in, stride_out, false, axes); r2c(shape_in, stride_in, stride_out, axes.back(), forward, data_in, data_out, fct, nthreads); if (axes.size()==1) return; shape_t shape_out(shape_in); shape_out[axes.back()] = shape_in[axes.back()]/2 + 1; auto newaxes = shape_t{axes.begin(), --axes.end()}; c2c(shape_out, stride_out, stride_out, newaxes, forward, data_out, data_out, T(1), nthreads); } template<typename T> void c2r(const shape_t &shape_out, const stride_t &stride_in, const stride_t &stride_out, size_t axis, bool forward, const complex<T> *data_in, T *data_out, T fct, size_t nthreads=1) { if (util::prod(shape_out)==0) return; util::sanity_check(shape_out, stride_in, stride_out, false, axis); shape_t shape_in(shape_out); shape_in[axis] = shape_out[axis]/2 + 1; cndarr<cmplx<T>> ain(data_in, shape_in, stride_in); ndarr<T> aout(data_out, shape_out, stride_out); general_c2r(ain, aout, axis, forward, fct, nthreads); } template<typename T> void c2r(const shape_t &shape_out, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const complex<T> *data_in, T *data_out, T fct, size_t nthreads=1) { if (util::prod(shape_out)==0) return; if (axes.size()==1) return c2r(shape_out, stride_in, stride_out, axes[0], forward, data_in, data_out, fct, nthreads); util::sanity_check(shape_out, stride_in, stride_out, false, axes); auto shape_in = shape_out; shape_in[axes.back()] = shape_out[axes.back()]/2 + 1; auto nval = util::prod(shape_in); stride_t stride_inter(shape_in.size()); stride_inter.back() = sizeof(cmplx<T>); for (int i=int(shape_in.size())-2; i>=0; --i) stride_inter[size_t(i)] = stride_inter[size_t(i+1)]*ptrdiff_t(shape_in[size_t(i+1)]); arr<complex<T>> tmp(nval); auto newaxes = shape_t({axes.begin(), --axes.end()}); c2c(shape_in, stride_in, stride_inter, newaxes, forward, data_in, tmp.data(), T(1), nthreads); c2r(shape_out, stride_inter, stride_out, axes.back(), forward, tmp.data(), data_out, fct, nthreads); } template<typename T> void r2r_fftpack(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool real2hermitian, bool forward, const T *data_in, T *data_out, T fct, size_t nthreads=1) { if (util::prod(shape)==0) return; util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes); cndarr<T> ain(data_in, shape, stride_in); ndarr<T> aout(data_out, shape, stride_out); general_r(ain, aout, axes, real2hermitian, forward, fct, nthreads); } template<typename T> void r2r_separable_hartley(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, const T *data_in, T *data_out, T fct, size_t nthreads=1) { if (util::prod(shape)==0) return; util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes); cndarr<T> ain(data_in, shape, stride_in); ndarr<T> aout(data_out, shape, stride_out); general_hartley(ain, aout, axes, fct, nthreads); } } // namespace detail using detail::FORWARD; using detail::BACKWARD; using detail::shape_t; using detail::stride_t; using detail::c2c; using detail::c2r; using detail::r2c; using detail::r2r_fftpack; using detail::r2r_separable_hartley; } // namespace pocketfft #undef POCKETFFT_NOINLINE #undef POCKETFFT_RESTRICT #endif // POCKETFFT_HDRONLY_H
csr.c
/* Author: Mohammed Ahmed Al Farhan Email: mohammed.farhan@kaust.edu.sa */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include "inc/allocator.h" #include "inc/geometry.h" #include "inc/msh/mesh.h" /* c stdlib qsort comparable function */ static inline int comp(const void *restrict a, const void *restrict b) { return (*((uint32_t *) a) - *((uint32_t *) b)); } void m2csr(struct geometry *restrict g) { /* Row pointers */ uint32_t *restrict ia; kcalloc((g->n->sz+1), sizeof(uint32_t), (void *) &ia); uint32_t i; for(i = 0; i < g->e->sz; i++) { ia[g->e->eptr->n0[i]+1]++; ia[g->e->eptr->n1[i]+1]++; } ia[0] = 1; for(i = 1; i <= g->n->sz; i++) { ia[i] += ia[i-1]; ia[i]++; } /* Adjust the IA array to Zero-index (c-style) */ for(i = 0; i <= g->n->sz; i++) ia[i]--; uint32_t *restrict ja; kmalloc(ia[g->n->sz], sizeof(uint32_t), (void *) &ja); /* A temp buffer used to keep tracking of each row elements */ uint32_t *restrict buf; kmalloc(g->n->sz, sizeof(uint32_t), (void *) &buf); /* Column Index of the diagonal elements */ for(i = 0; i < g->n->sz; i++) { ja[ia[i]] = i; // A diagonal element buf[i] = 1; // One element in this row has been added } /* Fill the rest of the array, ordered by RCM and using a * modified version of Breadth-First Search traversing algorithm */ for(i = 0; i < g->e->sz; i++) { uint32_t n0 = g->e->eptr->n0[i]; uint32_t n1 = g->e->eptr->n1[i]; /* Get the element index in the row * The index is basically the row index plus the last element that * has been added in the row. */ uint32_t indx = ia[n0] + buf[n0]; // Get the index buf[n0]++; // Column has been added (one more element in the row) ja[indx] = n1; // Store the node index in its corresponding index /* Do it for the other endpoint */ indx = ia[n1] + buf[n1]; buf[n1]++; ja[indx] = n0; } kfree(buf); // Number of nonzero block per row uint32_t *restrict nnz; kmalloc(g->n->sz, sizeof(uint32_t), (void *) &nnz); /* Sort the each row of a ja array in an increasing order * No we reorder them again to make sure the at each row * we have the node ordered in increasing order plus based on * their degree */ #pragma omp parallel for for(i = 0; i < g->n->sz; i++) { uint32_t jstart = ia[i]; uint32_t jend = ia[i+1]; /* Qsort to sort the JA array */ uint32_t * l = ja + jstart; // Low address uint32_t * h = ja + jend; // High address size_t sz = h - l; qsort(l, sz, sizeof(uint32_t), comp); uint32_t nz = 0; uint32_t j; for(j = jstart; j < jend; j++) nz++; nnz[i] = nz; } g->c->ia = ia; // Starting row indices g->c->ja = ja; // Column indices g->c->nnz = nnz; // Number of nonzero blocks #ifdef __USE_COMPRESSIBLE_FLOW /* Compressible Euler flow */ g->c->bsz = 5; // 5 unknowns per grid point #else /* Incompressible Euler flow */ g->c->bsz = 4; // 4 unknowns per grid point #endif /* Number of the matrix rows | columns */ g->c->sz = g->c->bsz * g->n->sz; }
Transform.h
#ifndef DASH__ALGORITHM__TRANSFORM_H__ #define DASH__ALGORITHM__TRANSFORM_H__ #include <dash/GlobAsyncRef.h> #include <dash/GlobRef.h> #include <dash/algorithm/LocalRange.h> #include <dash/algorithm/Operation.h> #include <dash/Iterator.h> #include <dash/internal/Config.h> #include <dash/util/Trace.h> #include <dash/dart/if/dart_communication.h> #ifdef DASH_ENABLE_OPENMP #include <omp.h> #endif namespace dash { #ifdef DOXYGEN /** * Apply a given function to elements in a range and store the result in * another range, beginning at \c out_first. * Corresponding to \c MPI_Accumulate, the unary operation is executed * atomically on single elements. * * Precondition: All elements in the input range are contained in a single * block so that * * <tt> * g_out_last == g_out_first + (l_in_last - l_in_first) * </tt> * * Semantics: * * <tt> * unary_op(in_first[0]), unary_op(in_first[1]), ..., unary_op(in_first[n]) * </tt> * * \returns Output iterator to the element past the last element transformed. * * \ingroup DashAlgorithms */ template< typename ValueType, class InputIt, class OutputIt, class UnaryOperation > OutputIt transform( InputIt in_first, InputIt in_last, OutputIt out_first, UnaryOperation unary_op); /** * Apply a given function to pairs of elements from two ranges and store the * result in another range, beginning at \c out_first. * * Corresponding to \c MPI_Accumulate, the binary operation is executed * atomically on single elements. * * Precondition: All elements in the input range are contained in a single * block so that * * g_out_last == g_out_first + (l_in_last - l_in_first) * * Semantics: * * binary_op(in_a[0], in_b[0]), * binary_op(in_a[1], in_b[1]), * ..., * binary_op(in_a[n], in_b[n]) * * Example: * \code * gptr_diff_t num_transformed_elements = * dash::distance( * dash::transform(in.begin(), in.end(), // A * out.begin(), // B * out.begin(), // C = op(A, B) * dash::plus<int>()), // op * out.end()); * * \endcode * * \returns Output iterator to the element past the last element transformed. * \see dash::reduce * \see DashReduceOperations * * \tparam InputIt Iterator on first (local) input range * \tparam GlobInputIt Iterator on second (global) input range * \tparam GlobOutputIt Iterator on global result range * \tparam BinaryOperation Reduce operation type * * \ingroup DashAlgorithms */ template< class InputIt1, class GlobInputIt, class GlobOutputIt, class BinaryOperation > GlobOutputIt transform( /// Iterator on begin of first local range InputIt1 in_a_first, /// Iterator after last element of local range InputIt1 in_a_last, /// Iterator on begin of second local range GlobInputIt in_b_first, /// Iterator on first element of global output range GlobOutputIt out_first, /// Reduce operation BinaryOperation binary_op); #else namespace internal { /** * Wrapper of the blocking DART accumulate operation. */ template< typename ValueType > inline dart_ret_t transform_blocking_impl( dart_gptr_t dest, ValueType * values, size_t nvalues, dart_operation_t op) { static_assert(dash::dart_datatype<ValueType>::value != DART_TYPE_UNDEFINED, "Cannot accumulate unknown type!"); dart_ret_t result = dart_accumulate( dest, (values), nvalues, dash::dart_datatype<ValueType>::value, op); dart_flush(dest); return result; } /** * Wrapper of the non-blocking DART accumulate operation. */ template< typename ValueType > dart_ret_t transform_impl( dart_gptr_t dest, ValueType * values, size_t nvalues, dart_operation_t op) { static_assert(dash::dart_datatype<ValueType>::value != DART_TYPE_UNDEFINED, "Cannot accumulate unknown type!"); dart_ret_t result = dart_accumulate( dest, (values), nvalues, dash::dart_datatype<ValueType>::value, op); dart_flush_local(dest); return result; } struct transform_impl_local_input_it{}; struct transform_impl_glob_input_it{}; /** * Transform operation on ranges with identical distribution and start * offset. * In this case, no communication is needed as all output values can be * obtained from input values in local memory: * * \note * This function does not execute the transformation as atomic operation * on elements. Use \c dash::transform if concurrent access to elements is * possible. * * <pre> * input a: [ u0 | u1 | u2 | ... ] * op op op ... * input b: [ u0 | u1 | u2 | ... ] * = = = ... * output: [ u0 | u1 | u2 | ... ] * </pre> */ template < typename ValueType, class InputAIt, class InputBIt, class GlobOutputIt, class BinaryOperation> GlobOutputIt transform_local( InputAIt in_a_first, InputAIt in_a_last, InputBIt in_b_first, GlobOutputIt out_first, BinaryOperation binary_op) { DASH_LOG_DEBUG("dash::transform_local()"); DASH_ASSERT_MSG(in_a_first.pattern() == in_b_first.pattern(), "dash::transform_local: " "distributions of input ranges differ"); DASH_ASSERT_MSG(in_a_first.pattern() == out_first.pattern(), "dash::transform_local: " "distributions of input- and output ranges differ"); // Local subrange of input range a: auto local_range_a = dash::local_range(in_a_first, in_a_last); ValueType * lbegin_a = local_range_a.begin; ValueType * lend_a = local_range_a.end; if (lbegin_a == lend_a) { // Local input range is empty, return initial output iterator to indicate // that no values have been transformed: DASH_LOG_DEBUG("dash::transform_local", "local range empty"); return out_first; } // Global offset of first local element: auto g_offset_first = in_a_first.pattern().global(0); // Number of elements in global ranges: auto num_gvalues = dash::distance(in_a_first, in_b_first); DASH_LOG_TRACE_VAR("dash::transform_local", num_gvalues); // Number of local elements: DASH_LOG_TRACE("dash::transform_local", "local elements:", lend_a-lbegin_a); // Local subrange of input range b: ValueType * lbegin_b = (in_b_first + g_offset_first).local(); // Local pointer of initial output element: ValueType * lbegin_out = (out_first + g_offset_first).local(); // Generate output values: #ifdef DASH_ENABLE_OPENMP dash::util::UnitLocality uloc; auto n_threads = uloc.num_domain_threads(); DASH_LOG_DEBUG("dash::transform_local", "thread capacity:", n_threads); if (n_threads > 1) { auto l_size = lend_a - lbegin_a; // TODO: Vectorize. // Documentation of Intel MIC intrinsics, see: // https://software.intel.com/de-de/node/523533 // https://software.intel.com/de-de/node/523387 #pragma omp parallel for num_threads(n_threads) schedule(static) for (int i = 0; i < l_size; i++) { lbegin_out[i] = binary_op(lbegin_a[i], lbegin_b[i]); } return out_first + num_gvalues; } #endif // No OpenMP or insufficient number of threads for parallelization: for (; lbegin_a != lend_a; ++lbegin_a, ++lbegin_b, ++lbegin_out) { *lbegin_out = binary_op(*lbegin_a, *lbegin_b); } // Return out_end iterator past final transformed element; return out_first + num_gvalues; } /** * Specialization of \c dash::transform for global lhs input range. */ template < class InputIt, class GlobInputIt, class GlobOutputIt, class BinaryOperation> GlobOutputIt transform( /// Iterator on begin of first local range InputIt in_a_first, /// Iterator after last element of local range InputIt in_a_last, /// Iterator on begin of second local range GlobInputIt in_b_first, /// Iterator on first element of global output range GlobOutputIt out_first, /// Reduce operation BinaryOperation binary_op, /// Specialization for a global input iterator transform_impl_glob_input_it /*unused*/) { using iterator_traits = dash::iterator_traits<InputIt>; DASH_LOG_DEBUG("dash::transform(gaf, gal, gbf, goutf, binop)"); auto in_first = in_a_first; auto in_last = in_a_last; if (in_b_first == out_first) { // Output range is rhs input range: C += A // Input is (in_a_first, in_a_last). } else { DASH_THROW( dash::exception::NotImplemented, "dash::transform is only implemented for out = op(in,out)"); // Output range different from rhs input range: C = A+B // Input is (in_a_first, in_a_last) + (in_b_first, in_b_last): // TODO: // in_range.allocate(...); // in_first = in_range.begin(); // in_last = in_range.end(); } dash::util::Trace trace("transform"); // Pattern of input ranges a and b, and output range: const auto& pattern_in_a = in_a_first.pattern(); const auto& pattern_in_b = in_b_first.pattern(); const auto& pattern_out = out_first.pattern(); #if __NON_ATOMIC__ // Fast path: check if transform operation is local-only: if (pattern_in_a == pattern_in_b && pattern_in_a == pattern_out) { // Identical pattern in all ranges if (in_a_first.pos() == in_b_first.pos() && in_a_first.pos() == out_first.pos()) { trace.enter_state("local"); // All units operate on local ranges that have identical distribution: auto out_last = dash::transform_local<iterator_traits::value_type>( in_a_first, in_a_last, in_b_first, out_first, binary_op); trace.exit_state("local"); return out_last; } } #endif // Resolve teams from global iterators: dash::Team & team_in_a = pattern_in_a.team(); DASH_ASSERT_MSG( team_in_a == pattern_in_b.team(), "dash::transform: Different teams in input ranges"); DASH_ASSERT_MSG( team_in_a == pattern_out.team(), "dash::transform: Different teams in input- and output ranges"); // Resolve local range from global range: auto l_index_range_in_a = local_index_range(in_a_first, in_a_last); DASH_LOG_TRACE_VAR("dash::transform", l_index_range_in_a.begin); DASH_LOG_TRACE_VAR("dash::transform", l_index_range_in_a.end); // Local range to global offset: auto global_offset = pattern_in_a.global( l_index_range_in_a.begin); DASH_LOG_TRACE_VAR("dash::transform", global_offset); // Number of elements in local range: size_t num_local_elements = l_index_range_in_a.end - l_index_range_in_a.begin; DASH_LOG_TRACE_VAR("dash::transform", num_local_elements); // Global iterator to dart_gptr_t: dart_gptr_t dest_gptr = (out_first + global_offset).dart_gptr(); // Native pointer to local sub-range: auto l_values = (in_a_first + global_offset).local(); // Send accumulate message: trace.enter_state("transform_blocking"); dash::internal::transform_blocking_impl( dest_gptr, l_values, num_local_elements, binary_op.dart_operation()); trace.exit_state("transform_blocking"); return out_first + global_offset + num_local_elements; } template < class InputIt, class GlobInputIt, class GlobOutputIt, class BinaryOperation> GlobOutputIt transform( /// Iterator on begin of first local range InputIt in_a_first, /// Iterator after last element of local range InputIt in_a_last, /// Iterator on begin of second local range GlobInputIt in_b_first, /// Iterator on first element of global output range GlobOutputIt out_first, /// Reduce operation BinaryOperation binary_op, transform_impl_local_input_it /*unused*/) { DASH_LOG_DEBUG("dash::transform(af, al, bf, outf, binop)"); // Outut range different from rhs input range is not supported yet auto in_first = in_a_first; auto in_last = in_a_last; using value_type = typename dash::iterator_traits<InputIt>::value_type; std::vector<value_type> in_range; if (in_b_first == out_first) { // Output range is rhs input range: C += A // Input is (in_a_first, in_a_last). } else { // Output range different from rhs input range: C = A+B // Input is (in_a_first, in_a_last) + (in_b_first, in_b_last): std::transform( in_a_first, in_a_last, in_b_first, std::back_inserter(in_range), binary_op); in_first = in_range.data(); in_last = in_first + in_range.size(); } dash::util::Trace trace("transform"); // Resolve local range from global range: // Number of elements in local range: size_t num_local_elements = std::distance(in_first, in_last); // Global iterator to dart_gptr_t: dart_gptr_t dest_gptr = out_first.dart_gptr(); // Send accumulate message: trace.enter_state("transform_blocking"); dash::internal::transform_blocking_impl( dest_gptr, in_first, num_local_elements, binary_op.dart_operation()); trace.exit_state("transform_blocking"); // The position past the last element transformed in global element space // cannot be resolved from the size of the local range if the local range // spans over more than one block. Otherwise, the difference of two global // iterators is not well-defined. The corresponding invariant is: // g_out_last == g_out_first + (l_in_last - l_in_first) // Example: // unit: 0 1 0 // local offset: | 0 1 2 | 0 1 2 | 3 4 5 | ... // global offset: | 0 1 2 3 4 5 6 7 8 ... // range: [- - - - -] // When iterating in local memory range [0,5[ of unit 0, the position of the // global iterator to return is 8 != 5 // For ranges over block borders, we would have to resolve the global // position past the last element transformed from the iterator's pattern // (see dash::PatternIterator). return out_first + num_local_elements; } } // namespace internal template < class InputIt, class GlobInputIt, class GlobOutputIt, class BinaryOperation> GlobOutputIt transform( InputIt in_a_first, InputIt in_a_last, GlobInputIt in_b_first, GlobOutputIt out_first, BinaryOperation binary_op) { using InputIt_traits_t = dash::iterator_traits<InputIt>; using InputIt_is_global_t = typename InputIt_traits_t::is_global_iterator; using GlobInputIt_traits_t = dash::iterator_traits<GlobInputIt>; using GlobOutputIt_traits_t = dash::iterator_traits<GlobOutputIt>; // currently we support only two cases: the range [in_a_first, in_a_last] may // be defined by global or non-global iterators (i.e., any STL iterator). // However, in_b_first and out_first have to be global iterators. static_assert( GlobInputIt_traits_t::is_global_iterator::value, "in_b_first must be a global iterator"); static_assert( GlobOutputIt_traits_t::is_global_iterator::value, "out_first must be a global iterator"); return internal::transform( in_a_first, in_a_last, in_b_first, out_first, binary_op, typename std::conditional< InputIt_is_global_t::value, internal::transform_impl_glob_input_it, internal::transform_impl_local_input_it>::type()); } /** * Specialization of \c dash::transform as non-blocking operation. * * \tparam InputIt Iterator on first (typically local) input range * \tparam GlobInputIt Iterator on second (typically global) input range * \tparam GlobOutputIt Iterator on global result range * \tparam BinaryOperation Reduce operation type */ template < typename ValueType, class InputIt, class GlobInputIt, class BinaryOperation> GlobAsyncRef<ValueType> transform( InputIt /*in_a_first*/, InputIt /*in_a_last*/, GlobInputIt /*in_b_first*/, GlobAsyncRef<ValueType> /*out_first*/, BinaryOperation /*binary_op*/ = dash::plus<ValueType>()) { DASH_THROW( dash::exception::NotImplemented, "Async variant of dash::transform is not implemented"); } #endif } // namespace dash #endif // DASH__ALGORITHM__TRANSFORM_H__
activate.c
#include "lib.h" #include <math.h> #include <stdint.h> #include <stdlib.h> void NEURALOPS_SYMBOL(rect_fwd)( size_t batch_sz, size_t dim, const float *in_buf, float *out_buf) { #pragma omp parallel for for (size_t p = 0; p < batch_sz * dim; p += 1) { float x = in_buf[p]; out_buf[p] = x * (x > 0.0f); } } void NEURALOPS_SYMBOL(rect_bwd)( size_t batch_sz, size_t dim, const float *out_buf, const float *out_grad, float *in_grad) { #pragma omp parallel for for (size_t p = 0; p < batch_sz * dim; p += 1) { float y = out_buf[p]; float dy = out_grad[p]; in_grad[p] = dy * (y > 0.0f); } } void NEURALOPS_SYMBOL(logistic_fwd)( size_t batch_sz, size_t dim, const float *in_buf, float *out_buf) { #pragma omp parallel for for (size_t p = 0; p < batch_sz * dim; p += 1) { float x = in_buf[p]; out_buf[p] = 1.0f / (1.0f + expf(-x)); } } void NEURALOPS_SYMBOL(logistic_bwd)( size_t batch_sz, size_t dim, const float *out_buf, const float *out_grad, float *in_grad) { #pragma omp parallel for for (size_t p = 0; p < batch_sz * dim; p += 1) { float y = out_buf[p]; float dy = out_grad[p]; in_grad[p] = y * (1.0f - y) * dy; } }
_image.c
/* Generated by Cython 0.23.3 */ /* BEGIN: Cython Metadata { "distutils": {} } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_23_3" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__fuel__transformers___image #define __PYX_HAVE_API__fuel__transformers___image #include "pythread.h" #include "string.h" #include "stdlib.h" #include "stdio.h" #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "fuel/transformers/_image.pyx", "stringsource", }; struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* "fuel/transformers/_image.pyx":5 * * * ctypedef long Py_intptr_t # <<<<<<<<<<<<<< * * ctypedef fused image_dtype: */ typedef long __pyx_t_4fuel_12transformers_6_image_Py_intptr_t; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":101 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":271 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":304 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":923 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":304 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":923 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { int result = PyDict_Contains(dict, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*); #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyObject_Ord(c)\ (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) #else #define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) #endif static long __Pyx__PyObject_Ord(PyObject* c); #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); static CYTHON_INLINE int __Pyx_IterFinish(void); #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** value1, PyObject** value2, int is_tuple, int has_known_size, int decref_tuple); static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_is_dict); static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname); static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); #include <string.h> static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } static CYTHON_INLINE long __Pyx_div_long(long, long); static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); static int __Pyx_SetVtable(PyObject *dict, void *vtable); static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); #define __Pyx_CyFunction_USED 1 #include <structmember.h> #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); typedef struct { __pyx_CyFunctionObject func; PyObject *__signatures__; PyObject *type; PyObject *self; } __pyx_FusedFunctionObject; #define __pyx_FusedFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __pyx_FusedFunction_New(__pyx_FusedFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__pyx_FusedFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject *qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject *code); static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self); static PyTypeObject *__pyx_FusedFunctionType = NULL; static int __pyx_FusedFunction_init(void); #define __Pyx_FusedFunction_USED typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_double(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_unsigned_char(PyObject *); typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_long(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim); static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'fuel.transformers._image' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static PyObject *__pyx_fuse_0__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_fuse_1__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_fuse_2__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_unsigned_char = { "unsigned char", NULL, sizeof(unsigned char), { 0 }, 0, IS_UNSIGNED(unsigned char) ? 'U' : 'I', IS_UNSIGNED(unsigned char), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_long = { "long", NULL, sizeof(long), { 0 }, 0, IS_UNSIGNED(long) ? 'U' : 'I', IS_UNSIGNED(long), 0 }; #define __Pyx_MODULE_NAME "fuel.transformers._image" int __pyx_module_is_main_fuel__transformers___image = 0; /* Implementation of 'fuel.transformers._image' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_AttributeError; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_zip; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static char __pyx_k_[] = "()"; static char __pyx_k_O[] = "O"; static char __pyx_k_c[] = "c"; static char __pyx_k__3[] = "|"; static char __pyx_k_id[] = "id"; static char __pyx_k_obj[] = "obj"; static char __pyx_k_out[] = "out"; static char __pyx_k_zip[] = "zip"; static char __pyx_k_args[] = "args"; static char __pyx_k_base[] = "base"; static char __pyx_k_kind[] = "kind"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_mode[] = "mode"; static char __pyx_k_name[] = "name"; static char __pyx_k_ndim[] = "ndim"; static char __pyx_k_pack[] = "pack"; static char __pyx_k_size[] = "size"; static char __pyx_k_step[] = "step"; static char __pyx_k_stop[] = "stop"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_ASCII[] = "ASCII"; static char __pyx_k_batch[] = "batch"; static char __pyx_k_class[] = "__class__"; static char __pyx_k_dtype[] = "dtype"; static char __pyx_k_error[] = "error"; static char __pyx_k_flags[] = "flags"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_shape[] = "shape"; static char __pyx_k_split[] = "split"; static char __pyx_k_start[] = "start"; static char __pyx_k_strip[] = "strip"; static char __pyx_k_double[] = "double"; static char __pyx_k_encode[] = "encode"; static char __pyx_k_format[] = "format"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kwargs[] = "kwargs"; static char __pyx_k_name_2[] = "__name__"; static char __pyx_k_struct[] = "struct"; static char __pyx_k_unpack[] = "unpack"; static char __pyx_k_fortran[] = "fortran"; static char __pyx_k_memview[] = "memview"; static char __pyx_k_ndarray[] = "ndarray"; static char __pyx_k_Ellipsis[] = "Ellipsis"; static char __pyx_k_defaults[] = "defaults"; static char __pyx_k_itemsize[] = "itemsize"; static char __pyx_k_TypeError[] = "TypeError"; static char __pyx_k_enumerate[] = "enumerate"; static char __pyx_k_IndexError[] = "IndexError"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static char __pyx_k_signatures[] = "signatures"; static char __pyx_k_ImportError[] = "ImportError"; static char __pyx_k_MemoryError[] = "MemoryError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_unsigned_char[] = "unsigned char"; static char __pyx_k_width_offsets[] = "width_offsets"; static char __pyx_k_AttributeError[] = "AttributeError"; static char __pyx_k_height_offsets[] = "height_offsets"; static char __pyx_k_allocate_buffer[] = "allocate_buffer"; static char __pyx_k_dtype_is_object[] = "dtype_is_object"; static char __pyx_k_window_batch_bchw[] = "window_batch_bchw"; static char __pyx_k_strided_and_direct[] = "<strided and direct>"; static char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static char __pyx_k_fuel_transformers__image[] = "fuel.transformers._image"; static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)"; static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; static char __pyx_k_No_matching_signature_found[] = "No matching signature found"; static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static char __pyx_k_pyx_fuse_0window_batch_bchw[] = "__pyx_fuse_0window_batch_bchw"; static char __pyx_k_pyx_fuse_1window_batch_bchw[] = "__pyx_fuse_1window_batch_bchw"; static char __pyx_k_pyx_fuse_2window_batch_bchw[] = "__pyx_fuse_2window_batch_bchw"; static char __pyx_k_Expected_at_least_d_arguments[] = "Expected at least %d arguments"; static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static char __pyx_k_Users_bartvm_fuel_fuel_transfor[] = "/Users/bartvm/fuel/fuel/transformers/_image.pyx"; static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static char __pyx_k_Function_call_with_ambiguous_arg[] = "Function call with ambiguous argument types"; static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_kp_s_; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_n_s_AttributeError; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Expected_at_least_d_arguments; static PyObject *__pyx_kp_s_Function_call_with_ambiguous_arg; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_kp_s_No_matching_signature_found; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_kp_s_Users_bartvm_fuel_fuel_transfor; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_kp_s__3; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_args; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_batch; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_defaults; static PyObject *__pyx_n_s_double; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_fuel_transformers__image; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_height_offsets; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_kind; static PyObject *__pyx_n_s_kwargs; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndarray; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_out; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pyx_fuse_0window_batch_bchw; static PyObject *__pyx_n_s_pyx_fuse_1window_batch_bchw; static PyObject *__pyx_n_s_pyx_fuse_2window_batch_bchw; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_signatures; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_split; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_strip; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_kp_s_unsigned_char; static PyObject *__pyx_n_s_width_offsets; static PyObject *__pyx_n_s_window_batch_bchw; static PyObject *__pyx_n_s_zip; static PyObject *__pyx_pf_4fuel_12transformers_6_image_window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */ static PyObject *__pyx_pf_4fuel_12transformers_6_image_2__pyx_fuse_0window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out); /* proto */ static PyObject *__pyx_pf_4fuel_12transformers_6_image_4__pyx_fuse_1window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out); /* proto */ static PyObject *__pyx_pf_4fuel_12transformers_6_image_6__pyx_fuse_2window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__16; static PyObject *__pyx_slice__17; static PyObject *__pyx_slice__18; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__21; /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ /* Python wrapper */ static PyObject *__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4fuel_12transformers_6_image_window_batch_bchw[] = "window_batch_bchw(batch, window_height, window_width,\n height_offsets, width_offsets, out)\n\n Perform windowing on a (batch, channels, height, width) image tensor.\n\n Parameters\n ----------\n batch : memoryview, 4-dimensional\n A 4-d tensor containing a batch of images in the expected\n format above.\n height_offsets : memoryview, integer, 1-dimensional\n An array of offsets for the height dimension of each image.\n Assumed that batch.shape[0] <= height_offsets.shape[0].\n width_offsets : memoryview, integer, 1-dimensional\n An array of offsets for the width dimension of each image.\n Assumed that batch.shape[0] <= width_offsets.shape[0].\n out : memoryview\n The array to which to write output. It is assumed that\n `out.shape[2] + height_offsets[i] <= batch.shape[2]` and\n `out.shape[3] + width_offsets[i] <= batch.shape[3]`, for\n all values of `i`.\n\n Notes\n -----\n Operates on a batch in parallel via OpenMP. Set `OMP_NUM_THREADS`\n to benefit from this parallelism.\n\n This is a low-level utility that, for the sake of speed, does\n not check its input for validity. Some amount of protection is\n provided by Cython memoryview objects.\n\n "; static PyMethodDef __pyx_mdef_4fuel_12transformers_6_image_1window_batch_bchw = {"window_batch_bchw", (PyCFunction)__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4fuel_12transformers_6_image_window_batch_bchw}; static PyObject *__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_signatures = 0; PyObject *__pyx_v_args = 0; PyObject *__pyx_v_kwargs = 0; CYTHON_UNUSED PyObject *__pyx_v_defaults = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_signatures = values[0]; __pyx_v_args = values[1]; __pyx_v_kwargs = values[2]; __pyx_v_defaults = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fuel.transformers._image.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4fuel_12transformers_6_image_window_batch_bchw(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4fuel_12transformers_6_image_window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) { PyObject *__pyx_v_dest_sig = NULL; PyTypeObject *__pyx_v_ndarray = 0; PyObject *__pyx_v_numpy = NULL; __Pyx_memviewslice __pyx_v_memslice; Py_ssize_t __pyx_v_itemsize; int __pyx_v_dtype_signed; char __pyx_v_kind; int __pyx_v_unsigned_char_is_signed; PyObject *__pyx_v_arg = NULL; PyObject *__pyx_v_dtype = NULL; PyObject *__pyx_v_arg_base = NULL; PyObject *__pyx_v_candidates = NULL; PyObject *__pyx_v_sig = NULL; int __pyx_v_match_found; PyObject *__pyx_v_src_type = NULL; PyObject *__pyx_v_dst_type = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; long __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_t_13; Py_ssize_t __pyx_t_14; PyObject *(*__pyx_t_15)(PyObject *); PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *(*__pyx_t_19)(PyObject *); int __pyx_t_20; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("window_batch_bchw", 0); __Pyx_INCREF(__pyx_v_kwargs); __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyList_SET_ITEM(__pyx_t_1, 0, Py_None); __pyx_v_dest_sig = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = (__pyx_v_kwargs == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_kwargs, __pyx_t_1); __pyx_t_1 = 0; } { __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_numpy = __pyx_t_1; __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_numpy, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(PyType_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "type", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1); __pyx_t_1 = 0; } __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = PyErr_ExceptionMatches(__pyx_builtin_ImportError) || PyErr_ExceptionMatches(__pyx_builtin_AttributeError) || PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_7) { __Pyx_AddTraceback("fuel.transformers._image.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_8, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(Py_None); __Pyx_XDECREF_SET(__pyx_v_ndarray, ((PyTypeObject*)Py_None)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); __pyx_L11_try_end:; } __pyx_v_itemsize = -1L; __pyx_v_unsigned_char_is_signed = (((unsigned char)-1L) < 0); if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((0 < __pyx_t_10) != 0); if (__pyx_t_3) { if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_9 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 0); __Pyx_INCREF(__pyx_t_9); __pyx_v_arg = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L14; } if (unlikely(__pyx_v_kwargs == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_batch, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { if (unlikely(__pyx_v_kwargs == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_9 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_batch); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_9); __pyx_v_arg = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L14; } /*else*/ { if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_9 = PyInt_FromSsize_t(__pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_8 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_arguments, __pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L14:; while (1) { __pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None)); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_v_dtype = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L18; } __pyx_t_2 = (__pyx_memoryview_check(__pyx_v_arg) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_v_arg_base = __pyx_t_8; __pyx_t_8 = 0; __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_v_dtype = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L19; } /*else*/ { __Pyx_INCREF(Py_None); __pyx_v_dtype = Py_None; } __pyx_L19:; goto __pyx_L18; } /*else*/ { __Pyx_INCREF(Py_None); __pyx_v_dtype = Py_None; } __pyx_L18:; __pyx_v_itemsize = -1L; __pyx_t_3 = (__pyx_v_dtype != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_itemsize = __pyx_t_10; __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_11 = __Pyx_PyObject_Ord(__pyx_t_8); if (unlikely(__pyx_t_11 == (long)(Py_UCS4)-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_kind = __pyx_t_11; __pyx_v_dtype_signed = (__pyx_v_kind == 'i'); switch (__pyx_v_kind) { case 'i': case 'u': __pyx_t_3 = (((sizeof(unsigned char)) == __pyx_v_itemsize) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L22_bool_binop_done; } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 4) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L22_bool_binop_done; } __pyx_t_3 = ((!((__pyx_v_unsigned_char_is_signed ^ __pyx_v_dtype_signed) != 0)) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L22_bool_binop_done:; if (__pyx_t_2) { if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_kp_s_unsigned_char, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } break; case 'f': __pyx_t_3 = (((sizeof(float)) == __pyx_v_itemsize) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L26_bool_binop_done; } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 4) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L26_bool_binop_done:; if (__pyx_t_2) { if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } __pyx_t_3 = (((sizeof(double)) == __pyx_v_itemsize) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L29_bool_binop_done; } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 4) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L29_bool_binop_done:; if (__pyx_t_2) { if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } break; case 'c': break; case 'O': break; default: break; } } } __pyx_t_3 = ((__pyx_v_itemsize == -1L) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L32_bool_binop_done; } __pyx_t_3 = ((__pyx_v_itemsize == (sizeof(float))) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L32_bool_binop_done:; if (__pyx_t_2) { __pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(__pyx_v_arg); __pyx_t_2 = (__pyx_v_memslice.memview != 0); if (__pyx_t_2) { __PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1); if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } /*else*/ { PyErr_Clear(); } } __pyx_t_3 = ((__pyx_v_itemsize == -1L) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L36_bool_binop_done; } __pyx_t_3 = ((__pyx_v_itemsize == (sizeof(double))) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L36_bool_binop_done:; if (__pyx_t_2) { __pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_double(__pyx_v_arg); __pyx_t_2 = (__pyx_v_memslice.memview != 0); if (__pyx_t_2) { __PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1); if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } /*else*/ { PyErr_Clear(); } } __pyx_t_3 = ((__pyx_v_itemsize == -1L) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L40_bool_binop_done; } __pyx_t_3 = ((__pyx_v_itemsize == (sizeof(unsigned char))) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L40_bool_binop_done:; if (__pyx_t_2) { __pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_unsigned_char(__pyx_v_arg); __pyx_t_2 = (__pyx_v_memslice.memview != 0); if (__pyx_t_2) { __PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1); if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_kp_s_unsigned_char, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } /*else*/ { PyErr_Clear(); } } if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L16_break; } __pyx_L16_break:; __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_v_candidates = ((PyObject*)__pyx_t_8); __pyx_t_8 = 0; __pyx_t_10 = 0; if (unlikely(__pyx_v_signatures == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_9 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_12), (&__pyx_t_7)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = __pyx_t_9; __pyx_t_9 = 0; while (1) { __pyx_t_13 = __Pyx_dict_iter_next(__pyx_t_8, __pyx_t_12, &__pyx_t_10, &__pyx_t_9, NULL, NULL, __pyx_t_7); if (unlikely(__pyx_t_13 == 0)) break; if (unlikely(__pyx_t_13 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_match_found = 0; __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_split); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_dest_sig); __Pyx_GIVEREF(__pyx_v_dest_sig); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_dest_sig); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_9 = __pyx_t_1; __Pyx_INCREF(__pyx_t_9); __pyx_t_14 = 0; __pyx_t_15 = NULL; } else { __pyx_t_14 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_15 = Py_TYPE(__pyx_t_9)->tp_iternext; if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_15)) { if (likely(PyList_CheckExact(__pyx_t_9))) { if (__pyx_t_14 >= PyList_GET_SIZE(__pyx_t_9)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_1 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_14 >= PyTuple_GET_SIZE(__pyx_t_9)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_15(__pyx_t_9); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_1); } if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_16 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_17 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_16 = PyList_GET_ITEM(sequence, 0); __pyx_t_17 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_16); __Pyx_INCREF(__pyx_t_17); #else __pyx_t_16 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_17 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_17); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_18 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_18); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_19 = Py_TYPE(__pyx_t_18)->tp_iternext; index = 0; __pyx_t_16 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_16)) goto __pyx_L47_unpacking_failed; __Pyx_GOTREF(__pyx_t_16); index = 1; __pyx_t_17 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_17)) goto __pyx_L47_unpacking_failed; __Pyx_GOTREF(__pyx_t_17); if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_18), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_19 = NULL; __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; goto __pyx_L48_unpacking_done; __pyx_L47_unpacking_failed:; __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; __pyx_t_19 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L48_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v_src_type, __pyx_t_16); __pyx_t_16 = 0; __Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_17); __pyx_t_17 = 0; __pyx_t_2 = (__pyx_v_dst_type != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_1 = PyObject_RichCompare(__pyx_v_src_type, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { __pyx_v_match_found = 1; goto __pyx_L50; } /*else*/ { __pyx_v_match_found = 0; goto __pyx_L46_break; } __pyx_L50:; } } __pyx_L46_break:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_3 = (__pyx_v_match_found != 0); if (__pyx_t_3) { __pyx_t_20 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_20 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = (__pyx_v_candidates != Py_None) && (PyList_GET_SIZE(__pyx_v_candidates) != 0); __pyx_t_2 = ((!__pyx_t_3) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_12 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_12 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = ((__pyx_t_12 > 1) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /*else*/ { __Pyx_XDECREF(__pyx_r); if (unlikely(__pyx_v_signatures == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_8 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(__pyx_t_8 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_8); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; } /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_16); __Pyx_XDECREF(__pyx_t_17); __Pyx_XDECREF(__pyx_t_18); __Pyx_AddTraceback("fuel.transformers._image.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dest_sig); __Pyx_XDECREF(__pyx_v_ndarray); __Pyx_XDECREF(__pyx_v_numpy); __Pyx_XDECREF(__pyx_v_arg); __Pyx_XDECREF(__pyx_v_dtype); __Pyx_XDECREF(__pyx_v_arg_base); __Pyx_XDECREF(__pyx_v_candidates); __Pyx_XDECREF(__pyx_v_sig); __Pyx_XDECREF(__pyx_v_src_type); __Pyx_XDECREF(__pyx_v_dst_type); __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pw_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_fuse_0__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out, CYTHON_UNUSED int __pyx_skip_dispatch) { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_index; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_width; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_height; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_extent; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_extent; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_0window_batch_bchw", 0); /* "fuel/transformers/_image.pyx":51 * """ * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] # <<<<<<<<<<<<<< * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent */ __pyx_v_window_width = (__pyx_v_out.shape[3]); /* "fuel/transformers/_image.pyx":52 * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] * cdef Py_intptr_t window_height = out.shape[2] # <<<<<<<<<<<<<< * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: */ __pyx_v_window_height = (__pyx_v_out.shape[2]); /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fuel/transformers/_image.pyx":55 * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: * for index in prange(batch.shape[0]): # <<<<<<<<<<<<<< * h_off = height_offsets[index] * w_off = width_offsets[index] */ if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundMemoryviewSliceNogil("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L4_error;} } __pyx_t_1 = (__pyx_v_batch.shape[0]); if (1 == 0) abort(); { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp4 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_4, __pyx_t_7, __pyx_t_5) firstprivate(__pyx_t_6, __pyx_t_8) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_index) lastprivate(__pyx_v_index) lastprivate(__pyx_v_w_off) lastprivate(__pyx_v_h_extent) lastprivate(__pyx_v_w_extent) lastprivate(__pyx_v_h_off) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ if (__pyx_parallel_why < 2) { __pyx_v_index = 0 + 1 * __pyx_t_2; /* Initialize private variables to invalid values */ __pyx_v_w_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_h_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_w_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_h_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); /* "fuel/transformers/_image.pyx":56 * with nogil: * for index in prange(batch.shape[0]): * h_off = height_offsets[index] # <<<<<<<<<<<<<< * w_off = width_offsets[index] * h_extent = h_off + window_height */ __pyx_t_4 = __pyx_v_index; __pyx_v_h_off = (*((long *) ( /* dim=0 */ (__pyx_v_height_offsets.data + __pyx_t_4 * __pyx_v_height_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":57 * for index in prange(batch.shape[0]): * h_off = height_offsets[index] * w_off = width_offsets[index] # <<<<<<<<<<<<<< * h_extent = h_off + window_height * w_extent = w_off + window_width */ __pyx_t_5 = __pyx_v_index; __pyx_v_w_off = (*((long *) ( /* dim=0 */ (__pyx_v_width_offsets.data + __pyx_t_5 * __pyx_v_width_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":58 * h_off = height_offsets[index] * w_off = width_offsets[index] * h_extent = h_off + window_height # <<<<<<<<<<<<<< * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_h_extent = (__pyx_v_h_off + __pyx_v_window_height); /* "fuel/transformers/_image.pyx":59 * w_off = width_offsets[index] * h_extent = h_off + window_height * w_extent = w_off + window_width # <<<<<<<<<<<<<< * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_w_extent = (__pyx_v_w_off + __pyx_v_window_width); /* "fuel/transformers/_image.pyx":60 * h_extent = h_off + window_height * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] # <<<<<<<<<<<<<< */ __pyx_t_6.data = __pyx_v_batch.data; __pyx_t_6.memview = __pyx_v_batch.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_batch.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_batch.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_batch.shape[1]; __pyx_t_6.strides[0] = __pyx_v_batch.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_7 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[2], __pyx_v_batch.strides[2], __pyx_v_batch.suboffsets[2], 2, 1, &__pyx_t_7, __pyx_v_h_off, __pyx_v_h_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[3], __pyx_v_batch.strides[3], __pyx_v_batch.suboffsets[3], 3, 2, &__pyx_t_7, __pyx_v_w_off, __pyx_v_w_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data = __pyx_v_out.data; __pyx_t_8.memview = __pyx_v_out.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_out.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_out.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_out.shape[1]; __pyx_t_8.strides[0] = __pyx_v_out.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_8.shape[1] = __pyx_v_out.shape[2]; __pyx_t_8.strides[1] = __pyx_v_out.strides[2]; __pyx_t_8.suboffsets[1] = -1; __pyx_t_8.shape[2] = __pyx_v_out.shape[3]; __pyx_t_8.strides[2] = __pyx_v_out.strides[3]; __pyx_t_8.suboffsets[2] = -1; if (unlikely(__pyx_memoryview_copy_contents(__pyx_t_6, __pyx_t_8, 3, 3, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); goto __pyx_L11; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L10; __pyx_L10:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_index; __pyx_parallel_temp1 = __pyx_v_w_off; __pyx_parallel_temp2 = __pyx_v_h_extent; __pyx_parallel_temp3 = __pyx_v_w_extent; __pyx_parallel_temp4 = __pyx_v_h_off; } __pyx_L11:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_index = __pyx_parallel_temp0; __pyx_v_w_off = __pyx_parallel_temp1; __pyx_v_h_extent = __pyx_parallel_temp2; __pyx_v_w_extent = __pyx_parallel_temp3; __pyx_v_h_off = __pyx_parallel_temp4; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __Pyx_AddTraceback("fuel.transformers._image.window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_fuse_0__pyx_mdef_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw = {"__pyx_fuse_0window_batch_bchw", (PyCFunction)__pyx_pw_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4fuel_12transformers_6_image_window_batch_bchw}; static PyObject *__pyx_pw_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_batch = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_height_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_width_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_out = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_fuse_0window_batch_bchw (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_batch,&__pyx_n_s_height_offsets,&__pyx_n_s_width_offsets,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_batch)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_height_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_0window_batch_bchw", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_width_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_0window_batch_bchw", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_0window_batch_bchw", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fuse_0window_batch_bchw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_batch = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(values[0]); if (unlikely(!__pyx_v_batch.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_height_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[1]); if (unlikely(!__pyx_v_height_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_width_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[2]); if (unlikely(!__pyx_v_width_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_out = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(values[3]); if (unlikely(!__pyx_v_out.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_fuse_0window_batch_bchw", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_0window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4fuel_12transformers_6_image_2__pyx_fuse_0window_batch_bchw(__pyx_self, __pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4fuel_12transformers_6_image_2__pyx_fuse_0window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_0window_batch_bchw", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundLocalError("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_height_offsets.memview)) { __Pyx_RaiseUnboundLocalError("height_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_width_offsets.memview)) { __Pyx_RaiseUnboundLocalError("width_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_out.memview)) { __Pyx_RaiseUnboundLocalError("out"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_fuse_0__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_0window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_batch, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_height_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_width_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_out, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pw_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_fuse_1__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out, CYTHON_UNUSED int __pyx_skip_dispatch) { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_index; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_width; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_height; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_extent; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_extent; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_1window_batch_bchw", 0); /* "fuel/transformers/_image.pyx":51 * """ * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] # <<<<<<<<<<<<<< * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent */ __pyx_v_window_width = (__pyx_v_out.shape[3]); /* "fuel/transformers/_image.pyx":52 * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] * cdef Py_intptr_t window_height = out.shape[2] # <<<<<<<<<<<<<< * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: */ __pyx_v_window_height = (__pyx_v_out.shape[2]); /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fuel/transformers/_image.pyx":55 * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: * for index in prange(batch.shape[0]): # <<<<<<<<<<<<<< * h_off = height_offsets[index] * w_off = width_offsets[index] */ if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundMemoryviewSliceNogil("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L4_error;} } __pyx_t_1 = (__pyx_v_batch.shape[0]); if (1 == 0) abort(); { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp4 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_4, __pyx_t_7, __pyx_t_5) firstprivate(__pyx_t_6, __pyx_t_8) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_w_off) lastprivate(__pyx_v_h_off) firstprivate(__pyx_v_index) lastprivate(__pyx_v_index) lastprivate(__pyx_v_h_extent) lastprivate(__pyx_v_w_extent) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ if (__pyx_parallel_why < 2) { __pyx_v_index = 0 + 1 * __pyx_t_2; /* Initialize private variables to invalid values */ __pyx_v_w_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_h_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_h_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_w_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); /* "fuel/transformers/_image.pyx":56 * with nogil: * for index in prange(batch.shape[0]): * h_off = height_offsets[index] # <<<<<<<<<<<<<< * w_off = width_offsets[index] * h_extent = h_off + window_height */ __pyx_t_4 = __pyx_v_index; __pyx_v_h_off = (*((long *) ( /* dim=0 */ (__pyx_v_height_offsets.data + __pyx_t_4 * __pyx_v_height_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":57 * for index in prange(batch.shape[0]): * h_off = height_offsets[index] * w_off = width_offsets[index] # <<<<<<<<<<<<<< * h_extent = h_off + window_height * w_extent = w_off + window_width */ __pyx_t_5 = __pyx_v_index; __pyx_v_w_off = (*((long *) ( /* dim=0 */ (__pyx_v_width_offsets.data + __pyx_t_5 * __pyx_v_width_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":58 * h_off = height_offsets[index] * w_off = width_offsets[index] * h_extent = h_off + window_height # <<<<<<<<<<<<<< * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_h_extent = (__pyx_v_h_off + __pyx_v_window_height); /* "fuel/transformers/_image.pyx":59 * w_off = width_offsets[index] * h_extent = h_off + window_height * w_extent = w_off + window_width # <<<<<<<<<<<<<< * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_w_extent = (__pyx_v_w_off + __pyx_v_window_width); /* "fuel/transformers/_image.pyx":60 * h_extent = h_off + window_height * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] # <<<<<<<<<<<<<< */ __pyx_t_6.data = __pyx_v_batch.data; __pyx_t_6.memview = __pyx_v_batch.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_batch.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_batch.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_batch.shape[1]; __pyx_t_6.strides[0] = __pyx_v_batch.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_7 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[2], __pyx_v_batch.strides[2], __pyx_v_batch.suboffsets[2], 2, 1, &__pyx_t_7, __pyx_v_h_off, __pyx_v_h_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[3], __pyx_v_batch.strides[3], __pyx_v_batch.suboffsets[3], 3, 2, &__pyx_t_7, __pyx_v_w_off, __pyx_v_w_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data = __pyx_v_out.data; __pyx_t_8.memview = __pyx_v_out.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_out.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_out.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_out.shape[1]; __pyx_t_8.strides[0] = __pyx_v_out.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_8.shape[1] = __pyx_v_out.shape[2]; __pyx_t_8.strides[1] = __pyx_v_out.strides[2]; __pyx_t_8.suboffsets[1] = -1; __pyx_t_8.shape[2] = __pyx_v_out.shape[3]; __pyx_t_8.strides[2] = __pyx_v_out.strides[3]; __pyx_t_8.suboffsets[2] = -1; if (unlikely(__pyx_memoryview_copy_contents(__pyx_t_6, __pyx_t_8, 3, 3, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); goto __pyx_L11; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L10; __pyx_L10:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_w_off; __pyx_parallel_temp1 = __pyx_v_h_off; __pyx_parallel_temp2 = __pyx_v_index; __pyx_parallel_temp3 = __pyx_v_h_extent; __pyx_parallel_temp4 = __pyx_v_w_extent; } __pyx_L11:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_w_off = __pyx_parallel_temp0; __pyx_v_h_off = __pyx_parallel_temp1; __pyx_v_index = __pyx_parallel_temp2; __pyx_v_h_extent = __pyx_parallel_temp3; __pyx_v_w_extent = __pyx_parallel_temp4; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __Pyx_AddTraceback("fuel.transformers._image.window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_fuse_1__pyx_mdef_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw = {"__pyx_fuse_1window_batch_bchw", (PyCFunction)__pyx_pw_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4fuel_12transformers_6_image_window_batch_bchw}; static PyObject *__pyx_pw_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_batch = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_height_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_width_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_out = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_fuse_1window_batch_bchw (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_batch,&__pyx_n_s_height_offsets,&__pyx_n_s_width_offsets,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_batch)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_height_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_1window_batch_bchw", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_width_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_1window_batch_bchw", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_1window_batch_bchw", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fuse_1window_batch_bchw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_batch = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_double(values[0]); if (unlikely(!__pyx_v_batch.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_height_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[1]); if (unlikely(!__pyx_v_height_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_width_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[2]); if (unlikely(!__pyx_v_width_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_out = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_double(values[3]); if (unlikely(!__pyx_v_out.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_fuse_1window_batch_bchw", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_1window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4fuel_12transformers_6_image_4__pyx_fuse_1window_batch_bchw(__pyx_self, __pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4fuel_12transformers_6_image_4__pyx_fuse_1window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_1window_batch_bchw", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundLocalError("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_height_offsets.memview)) { __Pyx_RaiseUnboundLocalError("height_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_width_offsets.memview)) { __Pyx_RaiseUnboundLocalError("width_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_out.memview)) { __Pyx_RaiseUnboundLocalError("out"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_fuse_1__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_1window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_batch, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_height_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_width_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_out, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pw_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4fuel_12transformers_6_image_1window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_fuse_2__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out, CYTHON_UNUSED int __pyx_skip_dispatch) { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_index; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_width; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_window_height; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_off; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_h_extent; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_v_w_extent; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_2window_batch_bchw", 0); /* "fuel/transformers/_image.pyx":51 * """ * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] # <<<<<<<<<<<<<< * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent */ __pyx_v_window_width = (__pyx_v_out.shape[3]); /* "fuel/transformers/_image.pyx":52 * cdef Py_intptr_t index * cdef Py_intptr_t window_width = out.shape[3] * cdef Py_intptr_t window_height = out.shape[2] # <<<<<<<<<<<<<< * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: */ __pyx_v_window_height = (__pyx_v_out.shape[2]); /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fuel/transformers/_image.pyx":55 * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: * for index in prange(batch.shape[0]): # <<<<<<<<<<<<<< * h_off = height_offsets[index] * w_off = width_offsets[index] */ if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundMemoryviewSliceNogil("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L4_error;} } __pyx_t_1 = (__pyx_v_batch.shape[0]); if (1 == 0) abort(); { __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_4fuel_12transformers_6_image_Py_intptr_t __pyx_parallel_temp4 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_4, __pyx_t_7, __pyx_t_5) firstprivate(__pyx_t_6, __pyx_t_8) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_h_off) lastprivate(__pyx_v_w_extent) lastprivate(__pyx_v_w_off) lastprivate(__pyx_v_h_extent) firstprivate(__pyx_v_index) lastprivate(__pyx_v_index) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ if (__pyx_parallel_why < 2) { __pyx_v_index = 0 + 1 * __pyx_t_2; /* Initialize private variables to invalid values */ __pyx_v_h_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_w_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_w_off = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); __pyx_v_h_extent = ((__pyx_t_4fuel_12transformers_6_image_Py_intptr_t)0xbad0bad0); /* "fuel/transformers/_image.pyx":56 * with nogil: * for index in prange(batch.shape[0]): * h_off = height_offsets[index] # <<<<<<<<<<<<<< * w_off = width_offsets[index] * h_extent = h_off + window_height */ __pyx_t_4 = __pyx_v_index; __pyx_v_h_off = (*((long *) ( /* dim=0 */ (__pyx_v_height_offsets.data + __pyx_t_4 * __pyx_v_height_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":57 * for index in prange(batch.shape[0]): * h_off = height_offsets[index] * w_off = width_offsets[index] # <<<<<<<<<<<<<< * h_extent = h_off + window_height * w_extent = w_off + window_width */ __pyx_t_5 = __pyx_v_index; __pyx_v_w_off = (*((long *) ( /* dim=0 */ (__pyx_v_width_offsets.data + __pyx_t_5 * __pyx_v_width_offsets.strides[0]) ))); /* "fuel/transformers/_image.pyx":58 * h_off = height_offsets[index] * w_off = width_offsets[index] * h_extent = h_off + window_height # <<<<<<<<<<<<<< * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_h_extent = (__pyx_v_h_off + __pyx_v_window_height); /* "fuel/transformers/_image.pyx":59 * w_off = width_offsets[index] * h_extent = h_off + window_height * w_extent = w_off + window_width # <<<<<<<<<<<<<< * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] */ __pyx_v_w_extent = (__pyx_v_w_off + __pyx_v_window_width); /* "fuel/transformers/_image.pyx":60 * h_extent = h_off + window_height * w_extent = w_off + window_width * out[index] = batch[index, :, h_off:h_extent, w_off:w_extent] # <<<<<<<<<<<<<< */ __pyx_t_6.data = __pyx_v_batch.data; __pyx_t_6.memview = __pyx_v_batch.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_batch.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_batch.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_batch.shape[1]; __pyx_t_6.strides[0] = __pyx_v_batch.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_7 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[2], __pyx_v_batch.strides[2], __pyx_v_batch.suboffsets[2], 2, 1, &__pyx_t_7, __pyx_v_h_off, __pyx_v_h_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_6, __pyx_v_batch.shape[3], __pyx_v_batch.strides[3], __pyx_v_batch.suboffsets[3], 3, 2, &__pyx_t_7, __pyx_v_w_off, __pyx_v_w_extent, 0, 1, 1, 0, 1) < 0)) { {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data = __pyx_v_out.data; __pyx_t_8.memview = __pyx_v_out.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_index; Py_ssize_t __pyx_tmp_shape = __pyx_v_out.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_out.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_out.shape[1]; __pyx_t_8.strides[0] = __pyx_v_out.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_8.shape[1] = __pyx_v_out.shape[2]; __pyx_t_8.strides[1] = __pyx_v_out.strides[2]; __pyx_t_8.suboffsets[1] = -1; __pyx_t_8.shape[2] = __pyx_v_out.shape[3]; __pyx_t_8.strides[2] = __pyx_v_out.strides[3]; __pyx_t_8.suboffsets[2] = -1; if (unlikely(__pyx_memoryview_copy_contents(__pyx_t_6, __pyx_t_8, 3, 3, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L8_error;} __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); goto __pyx_L11; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L10; __pyx_L10:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates2) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_h_off; __pyx_parallel_temp1 = __pyx_v_w_extent; __pyx_parallel_temp2 = __pyx_v_w_off; __pyx_parallel_temp3 = __pyx_v_h_extent; __pyx_parallel_temp4 = __pyx_v_index; } __pyx_L11:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_h_off = __pyx_parallel_temp0; __pyx_v_w_extent = __pyx_parallel_temp1; __pyx_v_w_off = __pyx_parallel_temp2; __pyx_v_h_extent = __pyx_parallel_temp3; __pyx_v_index = __pyx_parallel_temp4; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fuel/transformers/_image.pyx":54 * cdef Py_intptr_t window_height = out.shape[2] * cdef Py_intptr_t h_off, w_off, h_extent, w_extent * with nogil: # <<<<<<<<<<<<<< * for index in prange(batch.shape[0]): * h_off = height_offsets[index] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __Pyx_AddTraceback("fuel.transformers._image.window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_fuse_2__pyx_mdef_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw = {"__pyx_fuse_2window_batch_bchw", (PyCFunction)__pyx_pw_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4fuel_12transformers_6_image_window_batch_bchw}; static PyObject *__pyx_pw_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_batch = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_height_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_width_offsets = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_out = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_fuse_2window_batch_bchw (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_batch,&__pyx_n_s_height_offsets,&__pyx_n_s_width_offsets,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_batch)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_height_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_2window_batch_bchw", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_width_offsets)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_2window_batch_bchw", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fuse_2window_batch_bchw", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fuse_2window_batch_bchw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_batch = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_unsigned_char(values[0]); if (unlikely(!__pyx_v_batch.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_height_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[1]); if (unlikely(!__pyx_v_height_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_width_offsets = __Pyx_PyObject_to_MemoryviewSlice_ds_long(values[2]); if (unlikely(!__pyx_v_width_offsets.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_out = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_unsigned_char(values[3]); if (unlikely(!__pyx_v_out.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_fuse_2window_batch_bchw", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_2window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4fuel_12transformers_6_image_6__pyx_fuse_2window_batch_bchw(__pyx_self, __pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4fuel_12transformers_6_image_6__pyx_fuse_2window_batch_bchw(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_batch, __Pyx_memviewslice __pyx_v_height_offsets, __Pyx_memviewslice __pyx_v_width_offsets, __Pyx_memviewslice __pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_fuse_2window_batch_bchw", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_batch.memview)) { __Pyx_RaiseUnboundLocalError("batch"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_height_offsets.memview)) { __Pyx_RaiseUnboundLocalError("height_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_width_offsets.memview)) { __Pyx_RaiseUnboundLocalError("width_offsets"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (unlikely(!__pyx_v_out.memview)) { __Pyx_RaiseUnboundLocalError("out"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_fuse_2__pyx_f_4fuel_12transformers_6_image_window_batch_bchw(__pyx_v_batch, __pyx_v_height_offsets, __pyx_v_width_offsets, __pyx_v_out, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("fuel.transformers._image.__pyx_fuse_2window_batch_bchw", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_batch, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_height_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_width_offsets, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_out, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "View.MemoryView":119 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":125 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":126 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":128 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":129 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":128 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":131 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":132 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":131 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":134 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":135 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":134 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":136 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":137 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":140 * * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":141 * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":143 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":144 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":143 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":147 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":148 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":149 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":148 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":150 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":147 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":153 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":154 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":155 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":153 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":156 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":157 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":158 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":156 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":160 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L10:; /* "View.MemoryView":162 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":165 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":166 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":167 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":170 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":171 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":171 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":174 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":175 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":176 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":177 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":178 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":174 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":167 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":181 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":182 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":183 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":184 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":183 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":185 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":186 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":187 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":187 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":189 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":190 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":191 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":192 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":193 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":194 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":195 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":196 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":198 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":199 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":198 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":201 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":203 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":181 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":207 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":208 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":209 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":208 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":210 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":212 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":211 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":214 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyMem_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":210 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":215 * self._strides, self.ndim, False) * free(self.data) * PyMem_Free(self._shape) # <<<<<<<<<<<<<< * * property memview: */ PyMem_Free(__pyx_v_self->_shape); /* "View.MemoryView":207 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":219 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* Python wrapper */ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * def __get__(self): * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":222 * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":219 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":225 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":226 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":225 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":228 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":229 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":228 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":231 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":232 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":231 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":240 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":241 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":240 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":244 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":245 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":247 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":236 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":273 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":274 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":273 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":275 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":276 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":275 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":290 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":292 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":296 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":298 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":299 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":298 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":301 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview') */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":290 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":319 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":320 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":321 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":322 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":323 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":324 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":325 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":326 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * self.lock = PyThread_allocate_lock() */ Py_INCREF(Py_None); /* "View.MemoryView":324 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":322 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":328 * Py_INCREF(Py_None) * * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock == NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":329 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":330 * self.lock = PyThread_allocate_lock() * if self.lock == NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 330; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":329 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":332 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":333 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":332 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ goto __pyx_L8; } /* "View.MemoryView":335 * self.dtype_is_object = self.view.format == b'O' * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L8:; /* "View.MemoryView":337 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":339 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":319 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":341 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":342 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":343 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * if self.lock != NULL: */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":342 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":345 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":346 * * if self.lock != NULL: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); /* "View.MemoryView":345 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ } /* "View.MemoryView":341 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":348 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":350 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":352 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":353 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":352 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":355 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":348 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":358 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":359 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":360 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":359 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":362 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":365 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { /* "View.MemoryView":366 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":365 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":368 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":369 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":358 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":371 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":372 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":374 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":375 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":376 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":377 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":376 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":379 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":374 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":381 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":371 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":383 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":384 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":386 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":387 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":386 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":388 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":389 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } /* "View.MemoryView":384 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":391 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":383 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":393 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":397 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":398 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":399 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":397 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":393 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":401 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":403 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":408 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":410 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":411 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":412 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":413 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":412 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":414 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":410 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":416 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":418 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":419 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":420 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":419 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":422 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":426 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":427 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":426 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":428 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":431 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":401 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":433 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":434 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":435 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":433 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":440 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 440; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":443 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":445 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":449 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":450 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":449 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":451 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":446 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_12) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":447 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":437 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":453 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; char *__pyx_t_10; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":456 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":461 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":462 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":461 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":466 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 466; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_9 = __pyx_v_bytesvalue; __pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9); __pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9)); for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) { __pyx_t_10 = __pyx_t_13; __pyx_v_c = (__pyx_t_10[0]); /* "View.MemoryView":467 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_7; /* "View.MemoryView":466 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":467 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":453 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":470 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":471 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":472 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":471 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":474 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":476 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":477 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":476 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":479 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":481 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":482 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":481 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":484 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":486 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":487 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":486 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":489 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":491 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":492 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":493 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":494 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":495 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":496 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":470 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":503 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":504 * @cname('__pyx_memoryview_transpose') * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":505 * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":506 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * property base: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":503 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":510 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":511 * @cname('__pyx_memoryview__get__base') * def __get__(self): * return self.obj # <<<<<<<<<<<<<< * * property shape: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":510 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":515 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":516 * @cname('__pyx_memoryview_get_shape') * def __get__(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property strides: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":515 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":520 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":521 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":521 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":525 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property suboffsets: */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":520 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":529 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":530 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":531 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":530 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":533 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property ndim: */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":529 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":537 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":538 * @cname('__pyx_memoryview_get_ndim') * def __get__(self): * return self.view.ndim # <<<<<<<<<<<<<< * * property itemsize: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 538; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":537 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":542 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":543 * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * property nbytes: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":542 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":547 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":548 * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * property size: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":547 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":552 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":553 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":554 * def __get__(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":556 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":557 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":559 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":553 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":561 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":552 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":564 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":565 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":564 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":567 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":563 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":569 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":570 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":571 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":570 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":569 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":573 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":574 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":573 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":577 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":580 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":581 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":577 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":583 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":586 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":587 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":583 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":589 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":591 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":593 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":594 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":599 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":589 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":603 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":605 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":606 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 606; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":611 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 611; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":601 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":616 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":617 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":618 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":615 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":622 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":621 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":624 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":629 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":630 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":629 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":632 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":634 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":635 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":636 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":637 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":638 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":639 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":640 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":641 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":639 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":643 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__17); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; /* "View.MemoryView":644 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":638 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":646 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":647 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":646 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":649 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":650 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L6:; /* "View.MemoryView":637 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":652 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":653 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":654 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":653 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":656 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":624 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":658 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":659 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":660 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":661 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":660 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":658 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":668 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":669 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":676 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":680 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /* "View.MemoryView":682 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":683 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":684 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":682 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":686 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":687 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":693 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":694 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":699 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":700 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":704 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":705 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":709 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":706 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":705 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":712 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":713 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":714 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":715 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":716 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":712 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":718 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":719 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":720 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":722 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":723 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":724 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":726 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":732 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":704 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":734 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":735 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":736 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 736; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":737 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 737; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":735 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":734 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":740 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":741 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 740; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":740 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 740; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":668 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":765 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":785 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":787 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":788 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":787 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":789 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":790 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":789 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":785 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":793 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":795 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":796 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":795 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":799 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":800 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":801 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":802 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":803 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":802 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":800 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":804 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":805 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":806 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":805 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":808 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":804 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":799 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":810 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":811 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":810 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":813 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":815 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":816 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":817 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":818 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":819 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":818 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":816 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":820 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":821 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":820 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":815 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":823 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":824 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":823 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":826 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":828 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":829 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":828 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":833 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":835 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":836 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":835 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":838 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":838 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":842 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":843 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":844 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":847 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":847 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":850 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":852 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":854 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":857 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":858 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L26:; /* "View.MemoryView":853 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":860 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":852 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":862 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":765 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":868 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":870 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":871 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":874 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":875 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 875; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 875; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":876 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":874 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":878 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":879 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":880 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":880 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":883 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":885 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":885 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":883 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":888 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":889 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":888 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":891 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":892 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":895 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":868 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":901 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":902 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":904 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":905 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":909 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":910 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":911 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":912 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":914 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":915 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":914 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":917 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":901 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":934 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":935 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":934 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":937 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":938 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":939 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 939; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":938 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":941 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":937 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":944 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":945 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":944 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":947 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * property base: */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":943 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":951 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":952 * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":951 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":958 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":966 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":966 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":972 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":974 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":975 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":977 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":978 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":980 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":981 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":982 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":983 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":984 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":986 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":988 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":989 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":992 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":993 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":994 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":995 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":996 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":994 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":998 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":999 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1000 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1002 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1003 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1005 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":958 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1008 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1011 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1012 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1013 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1011 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1015 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1016 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1008 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1019 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1023 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1024 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1025 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1027 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1028 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1030 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1031 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1032 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1033 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1019 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1036 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1039 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1040 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1036 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1043 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1050 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1051 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1052 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1050 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1054 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1055 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1057 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1059 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1043 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1065 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1066 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1067 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1066 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1069 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1065 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1072 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1077 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1078 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1080 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1081 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1082 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1083 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1081 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1085 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1086 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1087 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1088 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1086 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1090 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1090 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1093 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1072 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1096 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1103 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1104 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1105 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1106 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1108 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1110 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1111 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1113 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1114 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1115 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1116 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1108 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1118 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1119 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1123 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1124 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1096 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1126 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1129 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1126 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1133 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1136 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1138 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1139 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1141 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1133 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1144 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1153 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1154 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1155 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1156 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1153 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1158 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1159 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1160 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1162 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1144 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1165 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1176 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1177 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1179 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1180 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1181 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1180 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1184 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1185 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1186 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1187 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1188 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1190 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1194 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1195 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1196 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src, order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1195 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1198 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1199 * * if slice_is_contig(src, order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1198 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1201 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1203 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1165 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1211 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1210 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1214 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1215 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1214 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1218 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1219 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1220 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1219 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1222 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":1218 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1225 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1233 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1234 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1236 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1237 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1238 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1241 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1241 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1243 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1244 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1243 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1246 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1248 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1249 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1250 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1251 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1252 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1250 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1254 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1254; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; /* "View.MemoryView":1249 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1256 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1257 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1256 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1259 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1261 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1262 * * if not slice_is_contig(&src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1261 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1264 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1265 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1259 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ } /* "View.MemoryView":1267 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1270 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim); /* "View.MemoryView":1270 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1272 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1273 * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim); /* "View.MemoryView":1272 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1275 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1277 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1278 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1279 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1280 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1281 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1275 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1267 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1283 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1286 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1287 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1287; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1283 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1289 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1290 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1291 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1293 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1294 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1225 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1297 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1301 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1303 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1304 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1305 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1306 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1308 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1309 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1310 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1311 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1297 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1319 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1323 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1324 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1323 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1319 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1328 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1331 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1328 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1334 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1338 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1339 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1340 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1341 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1340 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1343 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1339 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1345 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1346 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1348 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1334 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1354 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1357 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1358 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1360 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1354 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1364 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1368 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1369 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1371 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1372 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1373 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1374 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1371 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1376 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1377 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1379 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1364 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return get_memview(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "fuel.transformers._image.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "fuel.transformers._image.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_transpose(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview__get__base(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_shape(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_strides(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_suboffsets(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_ndim(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_itemsize(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_nbytes(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_size(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "fuel.transformers._image.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryviewslice__get__base(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "fuel.transformers._image._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "_image", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0}, {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_n_s_AttributeError, __pyx_k_AttributeError, sizeof(__pyx_k_AttributeError), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Expected_at_least_d_arguments, __pyx_k_Expected_at_least_d_arguments, sizeof(__pyx_k_Expected_at_least_d_arguments), 0, 0, 1, 0}, {&__pyx_kp_s_Function_call_with_ambiguous_arg, __pyx_k_Function_call_with_ambiguous_arg, sizeof(__pyx_k_Function_call_with_ambiguous_arg), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_kp_s_No_matching_signature_found, __pyx_k_No_matching_signature_found, sizeof(__pyx_k_No_matching_signature_found), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_kp_s_Users_bartvm_fuel_fuel_transfor, __pyx_k_Users_bartvm_fuel_fuel_transfor, sizeof(__pyx_k_Users_bartvm_fuel_fuel_transfor), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_batch, __pyx_k_batch, sizeof(__pyx_k_batch), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_defaults, __pyx_k_defaults, sizeof(__pyx_k_defaults), 0, 0, 1, 1}, {&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_fuel_transformers__image, __pyx_k_fuel_transformers__image, sizeof(__pyx_k_fuel_transformers__image), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_height_offsets, __pyx_k_height_offsets, sizeof(__pyx_k_height_offsets), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_kind, __pyx_k_kind, sizeof(__pyx_k_kind), 0, 0, 1, 1}, {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndarray, __pyx_k_ndarray, sizeof(__pyx_k_ndarray), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_out, __pyx_k_out, sizeof(__pyx_k_out), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pyx_fuse_0window_batch_bchw, __pyx_k_pyx_fuse_0window_batch_bchw, sizeof(__pyx_k_pyx_fuse_0window_batch_bchw), 0, 0, 1, 1}, {&__pyx_n_s_pyx_fuse_1window_batch_bchw, __pyx_k_pyx_fuse_1window_batch_bchw, sizeof(__pyx_k_pyx_fuse_1window_batch_bchw), 0, 0, 1, 1}, {&__pyx_n_s_pyx_fuse_2window_batch_bchw, __pyx_k_pyx_fuse_2window_batch_bchw, sizeof(__pyx_k_pyx_fuse_2window_batch_bchw), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_signatures, __pyx_k_signatures, sizeof(__pyx_k_signatures), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_kp_s_unsigned_char, __pyx_k_unsigned_char, sizeof(__pyx_k_unsigned_char), 0, 0, 1, 0}, {&__pyx_n_s_width_offsets, __pyx_k_width_offsets, sizeof(__pyx_k_width_offsets), 0, 0, 1, 1}, {&__pyx_n_s_window_batch_bchw, __pyx_k_window_batch_bchw, sizeof(__pyx_k_window_batch_bchw), 0, 0, 1, 1}, {&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_AttributeError = __Pyx_GetBuiltinName(__pyx_n_s_AttributeError); if (!__pyx_builtin_AttributeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s__3); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_No_matching_signature_found); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Function_call_with_ambiguous_arg); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":129 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":132 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":135 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":144 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":172 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":188 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":447 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":523 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":531 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":640 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":643 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); /* "View.MemoryView":654 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "View.MemoryView":661 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ __pyx_tuple__20 = PyTuple_Pack(4, __pyx_n_s_batch, __pyx_n_s_height_offsets, __pyx_n_s_width_offsets, __pyx_n_s_out); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_bartvm_fuel_fuel_transfor, __pyx_n_s_pyx_fuse_0window_batch_bchw, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":278 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__22)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":279 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":280 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__24)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":283 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":284 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__26)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_image(void); /*proto*/ PyMODINIT_FUNC init_image(void) #else PyMODINIT_FUNC PyInit__image(void); /*proto*/ PyMODINIT_FUNC PyInit__image(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__image(void)", 0); if (__Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("_image", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fuel__transformers___image) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fuel.transformers._image")) { if (unlikely(PyDict_SetItemString(modules, "fuel.transformers._image", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_array.tp_print = 0; __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /* "fuel/transformers/_image.pyx":15 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef window_batch_bchw(image_dtype[:, :, :, :] batch, # <<<<<<<<<<<<<< * long[:] height_offsets, long[:] width_offsets, * image_dtype[:, :, :, :] out): */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_fuse_0__pyx_mdef_4fuel_12transformers_6_image_3__pyx_fuse_0window_batch_bchw, 0, __pyx_n_s_pyx_fuse_0window_batch_bchw, NULL, __pyx_n_s_fuel_transformers__image, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_float, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_fuse_1__pyx_mdef_4fuel_12transformers_6_image_5__pyx_fuse_1window_batch_bchw, 0, __pyx_n_s_pyx_fuse_1window_batch_bchw, NULL, __pyx_n_s_fuel_transformers__image, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_double, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_fuse_2__pyx_mdef_4fuel_12transformers_6_image_7__pyx_fuse_2window_batch_bchw, 0, __pyx_n_s_pyx_fuse_2window_batch_bchw, NULL, __pyx_n_s_fuel_transformers__image, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple); if (PyDict_SetItem(__pyx_t_1, __pyx_kp_s_unsigned_char, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_mdef_4fuel_12transformers_6_image_1window_batch_bchw, 0, __pyx_n_s_window_batch_bchw, NULL, __pyx_n_s_fuel_transformers__image, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple); ((__pyx_FusedFunctionObject *) __pyx_t_2)->__signatures__ = __pyx_t_1; __Pyx_GIVEREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_window_batch_bchw, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fuel/transformers/_image.pyx":1 * cimport cython # <<<<<<<<<<<<<< * from cython.parallel cimport prange * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":205 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":278 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":279 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":280 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":283 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":284 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":498 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":954 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "__pyxutil":2 * * cdef extern from *: # <<<<<<<<<<<<<< * void __pyx_PyErr_Clear "PyErr_Clear" () * __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(object) */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init fuel.transformers._image", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fuel.transformers._image"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); #else PyErr_GetExcInfo(type, value, tb); #endif } static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(type, value, tb); #endif } static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { PyObject *local_type, *local_value, *local_tb; #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_COMPILING_IN_CPYTHON tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) { Py_ssize_t length; #if CYTHON_PEP393_ENABLED length = PyUnicode_GET_LENGTH(x); if (likely(length == 1)) { return PyUnicode_READ_CHAR(x, 0); } #else length = PyUnicode_GET_SIZE(x); if (likely(length == 1)) { return PyUnicode_AS_UNICODE(x)[0]; } #if Py_UNICODE_SIZE == 2 else if (PyUnicode_GET_SIZE(x) == 2) { Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0]; if (high_val >= 0xD800 && high_val <= 0xDBFF) { Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1]; if (low_val >= 0xDC00 && low_val <= 0xDFFF) { return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1))); } } } #endif #endif PyErr_Format(PyExc_ValueError, "only single character unicode strings can be converted to Py_UCS4, " "got length %" CYTHON_FORMAT_SSIZE_T "d", length); return (Py_UCS4)-1; } static long __Pyx__PyObject_Ord(PyObject* c) { Py_ssize_t size; if (PyBytes_Check(c)) { size = PyBytes_GET_SIZE(c); if (likely(size == 1)) { return (unsigned char) PyBytes_AS_STRING(c)[0]; } #if PY_MAJOR_VERSION < 3 } else if (PyUnicode_Check(c)) { return (long)__Pyx_PyUnicode_AsPy_UCS4(c); #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) } else if (PyByteArray_Check(c)) { size = PyByteArray_GET_SIZE(c); if (likely(size == 1)) { return (unsigned char) PyByteArray_AS_STRING(c)[0]; } #endif } else { PyErr_Format(PyExc_TypeError, "ord() expected string of length 1, but %.200s found", c->ob_type->tp_name); return (long)(Py_UCS4)-1; } PyErr_Format(PyExc_TypeError, "ord() expected a character, but string of length %zd found", size); return (long)(Py_UCS4)-1; } static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Clear(); else return -1; } } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { #else if (is_list || PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject* args = PyTuple_Pack(1, arg); return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL; } #endif static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { PyObject *method, *result = NULL; method = __Pyx_PyObject_GetAttrStr(obj, method_name); if (unlikely(!method)) goto bad; #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyMethod_Check(method))) { PyObject *self = PyMethod_GET_SELF(method); if (likely(self)) { PyObject *function = PyMethod_GET_FUNCTION(method); result = __Pyx_PyObject_CallOneArg(function, self); Py_DECREF(method); return result; } } #endif result = __Pyx_PyObject_CallNoArg(method); Py_DECREF(method); bad: return result; } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { __Pyx_RaiseNoneNotIterableError(); } else if (PyTuple_GET_SIZE(t) < index) { __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); } else { __Pyx_RaiseTooManyValuesError(index); } } static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int is_tuple, int has_known_size, int decref_tuple) { Py_ssize_t index; PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; if (!is_tuple && unlikely(!PyTuple_Check(tuple))) { iternextfunc iternext; iter = PyObject_GetIter(tuple); if (unlikely(!iter)) goto bad; if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } iternext = Py_TYPE(iter)->tp_iternext; value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; Py_DECREF(iter); } else { if (!has_known_size && unlikely(PyTuple_GET_SIZE(tuple) != 2)) { __Pyx_UnpackTupleError(tuple, 2); goto bad; } #if CYTHON_COMPILING_IN_PYPY value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; #else value1 = PyTuple_GET_ITEM(tuple, 0); value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value1); Py_INCREF(value2); #endif if (decref_tuple) { Py_DECREF(tuple); } } *pvalue1 = value1; *pvalue2 = value2; return 0; unpacking_failed: if (!has_known_size && __Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); bad: Py_XDECREF(iter); Py_XDECREF(value1); Py_XDECREF(value2); if (decref_tuple) { Py_XDECREF(tuple); } return -1; } static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_source_is_dict) { is_dict = is_dict || likely(PyDict_CheckExact(iterable)); *p_source_is_dict = is_dict; #if !CYTHON_COMPILING_IN_PYPY if (is_dict) { *p_orig_length = PyDict_Size(iterable); Py_INCREF(iterable); return iterable; } #endif *p_orig_length = 0; if (method_name) { PyObject* iter; iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); if (!iterable) return NULL; #if !CYTHON_COMPILING_IN_PYPY if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) return iterable; #endif iter = PyObject_GetIter(iterable); Py_DECREF(iterable); return iter; } return PyObject_GetIter(iterable); } static CYTHON_INLINE int __Pyx_dict_iter_next( PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { PyObject* next_item; #if !CYTHON_COMPILING_IN_PYPY if (source_is_dict) { PyObject *key, *value; if (unlikely(orig_length != PyDict_Size(iter_obj))) { PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); return -1; } if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { return 0; } if (pitem) { PyObject* tuple = PyTuple_New(2); if (unlikely(!tuple)) { return -1; } Py_INCREF(key); Py_INCREF(value); PyTuple_SET_ITEM(tuple, 0, key); PyTuple_SET_ITEM(tuple, 1, value); *pitem = tuple; } else { if (pkey) { Py_INCREF(key); *pkey = key; } if (pvalue) { Py_INCREF(value); *pvalue = value; } } return 1; } else if (PyTuple_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyTuple_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else if (PyList_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyList_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else #endif { next_item = PyIter_Next(iter_obj); if (unlikely(!next_item)) { return __Pyx_IterFinish(); } } if (pitem) { *pitem = next_item; } else if (pkey && pvalue) { if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) return -1; } else if (pkey) { *pkey = next_item; } else { *pvalue = next_item; } return 1; } static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) { #ifdef WITH_THREAD PyGILState_STATE gilstate = PyGILState_Ensure(); #endif __Pyx_RaiseUnboundLocalError(varname); #ifdef WITH_THREAD PyGILState_Release(gilstate); #endif } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #else PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Clear(); else return NULL; } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS && PY_MAJOR_VERSION >= 3 if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); long_long: llx = lla + llb; return PyLong_FromLongLong(llx); } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #endif __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_COMPILING_IN_CPYTHON op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(__pyx_CyFunctionObject, func.m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { __pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type); if (op == NULL) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; PyObject_GC_Track(op); return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyMem_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("<cyfunction %U at %p>", op->func_qualname, (void *)op); #else return PyString_FromFormat("<cyfunction %s at %p>", PyString_AsString(op->func_qualname), (void *)op); #endif } #if CYTHON_COMPILING_IN_PYPY static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; PyObject *self = f->m_self; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; result = (*meth)(self, arg0); Py_DECREF(arg0); return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } #else static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return PyCFunction_Call(func, arg, kw); } #endif static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_Call, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_CyFunction_init(void) { #if !CYTHON_COMPILING_IN_PYPY __pyx_CyFunctionType_type.tp_call = PyCFunction_Call; #endif __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (__pyx_CyFunctionType == NULL) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyMem_Malloc(size); if (!m->defaults) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } static PyObject * __pyx_FusedFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject *qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject *code) { __pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_New(type, ml, flags, qualname, self, module, globals, code); if (!fusedfunc) return NULL; fusedfunc->__signatures__ = NULL; fusedfunc->type = NULL; fusedfunc->self = NULL; return (PyObject *) fusedfunc; } static void __pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self) { __pyx_FusedFunction_clear(self); __pyx_FusedFunctionType->tp_free((PyObject *) self); } static int __pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self, visitproc visit, void *arg) { Py_VISIT(self->self); Py_VISIT(self->type); Py_VISIT(self->__signatures__); return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg); } static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self) { Py_CLEAR(self->self); Py_CLEAR(self->type); Py_CLEAR(self->__signatures__); return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self); } static PyObject * __pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type) { __pyx_FusedFunctionObject *func, *meth; func = (__pyx_FusedFunctionObject *) self; if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(self); return self; } if (obj == Py_None) obj = NULL; meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_NewEx( ((PyCFunctionObject *) func)->m_ml, ((__pyx_CyFunctionObject *) func)->flags, ((__pyx_CyFunctionObject *) func)->func_qualname, ((__pyx_CyFunctionObject *) func)->func_closure, ((PyCFunctionObject *) func)->m_module, ((__pyx_CyFunctionObject *) func)->func_globals, ((__pyx_CyFunctionObject *) func)->func_code); if (!meth) return NULL; Py_XINCREF(func->func.func_classobj); meth->func.func_classobj = func->func.func_classobj; Py_XINCREF(func->__signatures__); meth->__signatures__ = func->__signatures__; Py_XINCREF(type); meth->type = type; Py_XINCREF(func->func.defaults_tuple); meth->func.defaults_tuple = func->func.defaults_tuple; if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD) obj = type; Py_XINCREF(obj); meth->self = obj; return (PyObject *) meth; } static PyObject * _obj_to_str(PyObject *obj) { if (PyType_Check(obj)) return PyObject_GetAttr(obj, __pyx_n_s_name_2); else return PyObject_Str(obj); } static PyObject * __pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx) { PyObject *signature = NULL; PyObject *unbound_result_func; PyObject *result_func = NULL; if (self->__signatures__ == NULL) { PyErr_SetString(PyExc_TypeError, "Function is not fused"); return NULL; } if (PyTuple_Check(idx)) { PyObject *list = PyList_New(0); Py_ssize_t n = PyTuple_GET_SIZE(idx); PyObject *string = NULL; PyObject *sep = NULL; int i; if (!list) return NULL; for (i = 0; i < n; i++) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *item = PyTuple_GET_ITEM(idx, i); #else PyObject *item = PySequence_ITEM(idx, i); #endif string = _obj_to_str(item); #if !CYTHON_COMPILING_IN_CPYTHON Py_DECREF(item); #endif if (!string || PyList_Append(list, string) < 0) goto __pyx_err; Py_DECREF(string); } sep = PyUnicode_FromString("|"); if (sep) signature = PyUnicode_Join(sep, list); __pyx_err: ; Py_DECREF(list); Py_XDECREF(sep); } else { signature = _obj_to_str(idx); } if (!signature) return NULL; unbound_result_func = PyObject_GetItem(self->__signatures__, signature); if (unbound_result_func) { if (self->self || self->type) { __pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func; Py_CLEAR(unbound->func.func_classobj); Py_XINCREF(self->func.func_classobj); unbound->func.func_classobj = self->func.func_classobj; result_func = __pyx_FusedFunction_descr_get(unbound_result_func, self->self, self->type); } else { result_func = unbound_result_func; Py_INCREF(result_func); } } Py_DECREF(signature); Py_XDECREF(unbound_result_func); return result_func; } static PyObject * __pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw) { __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; PyObject *result; int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD && !((__pyx_FusedFunctionObject *) func)->__signatures__); if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) { Py_ssize_t argc; PyObject *new_args; PyObject *self; PyObject *m_self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (!new_args) return NULL; self = PyTuple_GetItem(args, 0); if (!self) return NULL; m_self = cyfunc->func.m_self; cyfunc->func.m_self = self; result = __Pyx_CyFunction_Call(func, new_args, kw); cyfunc->func.m_self = m_self; Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyObject * __pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) { __pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func; Py_ssize_t argc = PyTuple_GET_SIZE(args); PyObject *new_args = NULL; __pyx_FusedFunctionObject *new_func = NULL; PyObject *result = NULL; PyObject *self = NULL; int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD; int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD; if (binding_func->self) { Py_ssize_t i; new_args = PyTuple_New(argc + 1); if (!new_args) return NULL; self = binding_func->self; #if !CYTHON_COMPILING_IN_CPYTHON Py_INCREF(self); #endif Py_INCREF(self); PyTuple_SET_ITEM(new_args, 0, self); for (i = 0; i < argc; i++) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *item = PyTuple_GET_ITEM(args, i); Py_INCREF(item); #else PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad; #endif PyTuple_SET_ITEM(new_args, i + 1, item); } args = new_args; } else if (binding_func->type) { if (argc < 1) { PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given."); return NULL; } #if CYTHON_COMPILING_IN_CPYTHON self = PyTuple_GET_ITEM(args, 0); #else self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL; #endif } if (self && !is_classmethod && !is_staticmethod) { int is_instance = PyObject_IsInstance(self, binding_func->type); if (unlikely(!is_instance)) { PyErr_Format(PyExc_TypeError, "First argument should be of type %.200s, got %.200s.", ((PyTypeObject *) binding_func->type)->tp_name, self->ob_type->tp_name); goto bad; } else if (unlikely(is_instance == -1)) { goto bad; } } #if !CYTHON_COMPILING_IN_CPYTHON Py_XDECREF(self); self = NULL; #endif if (binding_func->__signatures__) { PyObject *tup = PyTuple_Pack(4, binding_func->__signatures__, args, kw == NULL ? Py_None : kw, binding_func->func.defaults_tuple); if (!tup) goto bad; new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL); Py_DECREF(tup); if (!new_func) goto bad; Py_XINCREF(binding_func->func.func_classobj); Py_CLEAR(new_func->func.func_classobj); new_func->func.func_classobj = binding_func->func.func_classobj; func = (PyObject *) new_func; } result = __pyx_FusedFunction_callfunction(func, args, kw); bad: #if !CYTHON_COMPILING_IN_CPYTHON Py_XDECREF(self); #endif Py_XDECREF(new_args); Py_XDECREF((PyObject *) new_func); return result; } static PyMemberDef __pyx_FusedFunction_members[] = { {(char *) "__signatures__", T_OBJECT, offsetof(__pyx_FusedFunctionObject, __signatures__), READONLY, 0}, {0, 0, 0, 0, 0}, }; static PyMappingMethods __pyx_FusedFunction_mapping_methods = { 0, (binaryfunc) __pyx_FusedFunction_getitem, 0, }; static PyTypeObject __pyx_FusedFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "fused_cython_function", sizeof(__pyx_FusedFunctionObject), 0, (destructor) __pyx_FusedFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif 0, 0, 0, &__pyx_FusedFunction_mapping_methods, 0, (ternaryfunc) __pyx_FusedFunction_call, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, 0, (traverseproc) __pyx_FusedFunction_traverse, (inquiry) __pyx_FusedFunction_clear, 0, 0, 0, 0, 0, __pyx_FusedFunction_members, __pyx_CyFunction_getsets, &__pyx_CyFunctionType_type, 0, __pyx_FusedFunction_descr_get, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_FusedFunction_init(void) { __pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type); if (__pyx_FusedFunctionType == NULL) { return -1; } return 0; } static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 4, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 4, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 4, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } Py_DECREF(obj); view->obj = NULL; } #endif static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_long(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_long, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) { const Py_ssize_t length = PyBytes_GET_SIZE(bytes); char* char_start = PyBytes_AS_STRING(bytes); char* pos; for (pos=char_start; pos < char_start+length; pos++) { if (character == pos[0]) return 1; } return 0; } static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs->memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize) return 0; itemsize *= mvs->shape[index]; } return 1; } static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
mlp_mnist_bf16_avx512_numa.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas, Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" #define TEST_ACCURACY #define OVERWRITE_DOUTPUT_BWDUPD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)_mm512_cvtneps_pbh((B))) static int threads_per_numa = 0; LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); buf[i] = tmp.i[1]; } } LIBXSMM_INLINE void init_buf_bf16_numa_aware(int threads, int ltid, int ft_mode, libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int chunksize, chunks; int my_numa_node = ltid/threads_per_numa; int n_numa_nodes = threads/threads_per_numa; int l = 0; if (ft_mode == 0) { /* Mode 0 : Block cyclic assignment to NUMA nodes */ int bufsize = size * 2; chunksize = 4096; chunks = (bufsize + chunksize - 1)/chunksize; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : bufsize - (chunks-1) * chunksize; if ( l % n_numa_nodes == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*(chunksize/2), _chunksize/2, 0, 0 ); } } } else { /* Mode 1: Block assignement to NUMA nodes */ chunks = n_numa_nodes; chunksize = (size + chunks - 1) /chunks; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : size - (chunks-1) * chunksize; if ( l == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*chunksize, _chunksize, 0, 0 ); } } } } void init_buffer_block_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 1, buf, size, 0, 0); } } } void init_buffer_block_cyclic_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 0, buf, size, 0, 0); } } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K/bk; int cBlocks = C/bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; } my_smax_bwd_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_M_hyperpartitions; libxsmm_blasint fwd_N_hyperpartitions; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_relu_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16bf16_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_M_hyperpartitions; libxsmm_blasint bwd_N_hyperpartitions; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_M_hyperpartitions; libxsmm_blasint upd_N_hyperpartitions; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier* barrier; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary upd_norm_to_vnni_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel_wt; } my_fc_bwd_config; my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk*bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else if (threads == 14) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 7; } else if (threads == 56) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 14; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 4; } else if (threads == 1) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 1; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.C/res.bc)/res.fwd_bf; res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.fwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY ); if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16bf16_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_colbcast_bf16bf16_copy_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_colbcast_bf16bf16_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( res.fwd_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_copy_bf16fp32_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc*bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig= bc; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.bwd_M_hyperpartitions = 1; res.upd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 8; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 14) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 7; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 7; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 56) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 14; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 4; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 14; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 4; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 1) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 1; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.K/res.bk)/res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.bwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn,&ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.bwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if ( res.vnni_to_vnniT_kernel == NULL ) { fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.N/res.bn)/res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.upd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.upd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if ( res.delbias_reduce_kernel == NULL ) { fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.upd_norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.upd_norm_to_vnni_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_vnni_kernel_wt = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel_wt == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if ( res.norm_to_normT_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); #ifdef OVERWRITE_DOUTPUT_BWDUPD res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; #else res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + 2 * sizeof(libxsmm_bfloat16) * res.N * res.K; #endif res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ; return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2); return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch ) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc/lpb; /* const libxsmm_blasint bc = cfg.bc;*/ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.fwd_relu_kernel; libxsmm_bmmfunction_reducebatch_strd gemm_kernel = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? cfg.gemm_fwd2 : cfg.gemm_fwd3; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksMB, hyperteam_id; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.fwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.fwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.fwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.fwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); if (use_2d_blocking == 1) { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); cfg.fwd_colbcast_bf16bf16_copy_kernel(&copy_params); } gemm_kernel( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { relu_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); cfg.fwd_colbcast_bf16bf16_copy_kernel(&copy_params); } gemm_kernel( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { relu_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, const libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr, const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch ) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint bk_lp = bk/lpb; const libxsmm_blasint bn_lp = bn/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); #ifdef OVERWRITE_DOUTPUT_BWDUPD libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch; #else libxsmm_bfloat16 *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)grad_output_ptr + cfg.N * cfg.K : (libxsmm_bfloat16*)scratch; #endif LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1/nBlocksOFm; ofm1 = mb1ofm1%nBlocksOFm; relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){ libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb); float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksIFm, _nBlocksMB, hyperteam_id; col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksIFm = nBlocksIFm/cfg.bwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.bwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.bwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.bwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksIFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksIFm); my_M_end = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksIFm); } /* transpose weight */ for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); if (use_2d_blocking == 1) { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* Set up tensors for transposing/scratch before vnni reformatting dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk); libxsmm_bfloat16 _tmp[bc*bk]; const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksIFm, hyperteam_id; col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.upd_M_hyperpartitions; _nBlocksIFm = nBlocksIFm/cfg.upd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.upd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.upd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksIFm + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksIFm); my_N_end = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksIFm); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); if (use_2d_blocking == 1) { ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), _tmp, &blocks); trans_param.in.primary = _tmp; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.upd_norm_to_vnni_kernel(&trans_param); } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); } } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), _tmp, &blocks); trans_param.in.primary = _tmp; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.upd_norm_to_vnni_kernel(&trans_param); } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } } void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); #if 0 /*defined(__AVX512BW__)*/ libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */ __m512 vlr = _mm512_set1_ps( cfg.lr ); for ( i = thr_begin; i < thr_begin+iv; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) ); _mm512_store_fil( wt_ptr+i, newfilter ); _mm512_storeu_ps( master_wt_ptr+i, newfilter ); } for ( i = thr_begin+iv; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #else for ( i = thr_begin; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16* poutput_bf16 = out_act_ptr; const libxsmm_bfloat16* pinput_bf16 = in_act_ptr; float* poutput_fp32 = (float*)scratch; float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss += LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16* poutput_bf16 = out_act_ptr; libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr; float* poutput_fp32 = (float*)scratch; float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void init_master_weights( my_opt_config cfg, float* master_wt_ptr, size_t size) { #if 0 if (0/* && cfg.upd_N_hyperpartitions != 1 */) { /* TODO: add hyperpartitions (?) */ /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa((libxsmm_bfloat16*) master_wt_ptr, size/2); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(master_wt_ptr, size); } #endif } void init_weights( my_fc_fwd_config cfg, libxsmm_bfloat16* wt_ptr, size_t size) { if (cfg.fwd_M_hyperpartitions != 1) { /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa(wt_ptr, size); } else { /* Init weights in a block fashion */ init_buffer_block_cyclic_numa(wt_ptr, size); } } void init_dweights( my_fc_bwd_config cfg, libxsmm_bfloat16* dwt_ptr, size_t size) { if (cfg.upd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(dwt_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(dwt_ptr, size); } } void init_acts( my_fc_fwd_config cfg, libxsmm_bfloat16* act_ptr, size_t size) { if (cfg.fwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(act_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(act_ptr, size); } } void init_delacts( my_fc_bwd_config cfg, libxsmm_bfloat16* delact_ptr, size_t size) { if (cfg.bwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(delact_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(delact_ptr, size); } } int main(int argc, char* argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; void* scratch = NULL; size_t scratch_size = 0; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.1f; float loss_weight = 1.0f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); char* env_threads_per_numa; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* Read env variables */ env_threads_per_numa = getenv("THREADS_PER_NUMA"); if ( 0 == env_threads_per_numa ) { printf("please specify THREADS_PER_NUMA to a non-zero value!\n"); return -1; } else { threads_per_numa = atoi(env_threads_per_numa); } /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) ); /* allocate data */ act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) ); delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float**) malloc( num_layers*sizeof(float*) ); fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ size_t max_bwd_scratch_size = 0, max_doutput_scratch_mark = 0; scratch_size = 0; for ( i = 0; i < num_layers; ++i ) { /* MNIST Specific where everywhere we use relu act except the last layer */ if ( i < num_layers -1 ) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); if (my_fc_bwd[i].scratch_size > 0 && my_fc_bwd[i].scratch_size > max_bwd_scratch_size) { max_bwd_scratch_size = my_fc_bwd[i].scratch_size; } if (my_fc_bwd[i].doutput_scratch_mark > 0 && my_fc_bwd[i].doutput_scratch_mark > max_doutput_scratch_mark) { max_doutput_scratch_mark = my_fc_bwd[i].doutput_scratch_mark; } /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } } /* softmax+loss is treated as N+1 layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight ); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { init_acts(my_fc_fwd[i], act_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers+1; ++i ) { init_delacts(my_fc_bwd[i], delact_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers; ++i ) { /*init_master_weights(my_opt[i], fil_master[i], C[i]*C[i+1] );*/ my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 ); libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] ); /*init_weights(my_fc_fwd[i], fil_libxsmm[i], C[i]*C[i+1]);*/ init_dweights(my_fc_bwd[i], delfil_libxsmm[i], C[i]*C[i+1]); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 ); } zero_buf_int32( label_libxsmm, MB ); /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN/MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; libxsmm_bfloat16 *input_acts = (libxsmm_bfloat16*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches*MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) train_image[_i][_j] : (float)0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches*MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j,epoch_id,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, &loss, 0, tid, scratch ); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch ); my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch ); my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch ); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)((double)n_epochs *(double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches)))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST/MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) test_image[_i][_j] : 0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } n_batches = NUM_TEST/MB; unsigned int hits = 0; unsigned int samples = 0; #if defined(_OPENMP) # pragma omp parallel private(i,j,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], test_label + batch_id * MB, &loss, 0, tid, scratch ); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; libxsmm_convert_bf16_f32( act_libxsmm[num_layers+1] + _i * 10, &max_val, 1 ); /* Find predicted label */ for (_j = 1; _j < 10; _j++) { libxsmm_bfloat16 val = *(act_libxsmm[num_layers+1] + _i * 10 + _j); float f32_val; libxsmm_convert_bf16_f32( &val, &f32_val, 1 ); if (f32_val > max_val) { max_id = _j; max_val = f32_val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } #pragma omp barrier } } printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples); #endif /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_master ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
jacobi_avx2.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title, char *name, REAL *A, int n, int m) { printf("%s:\n", title); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) { int i; int j; int xx; int yy; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int) (-1.0 + (*dx * (i - 1)))); yy = ((int) (-1.0 + (*dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx))) * (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx)))) - (2.0 * (1.0 - (yy * yy)))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) { int i; int j; REAL xx; REAL yy; REAL temp; REAL error; error = 0.0; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (-1.0 + (dx * (i - 1))); yy = (-1.0 + (dy * (j - 1))); temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy)))); error = (error + (temp * temp)); } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n", error); } void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits); void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits); int main(int argc, char *argv[]) { int n = DEFAULT_DIMSIZE; int m = DEFAULT_DIMSIZE; REAL alpha = 0.0543; REAL tol = 0.0000000001; REAL relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1], "%d", &n); m = n; } else if (argc == 3) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); } else if (argc == 4) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); } else if (argc == 5) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); } else if (argc == 6) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); sscanf(argv[5], "%g", &relax); } else if (argc == 7) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); sscanf(argv[5], "%g", &relax); sscanf(argv[6], "%d", &mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ REAL *u = (REAL *) malloc(sizeof(REAL) * n * m); REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m); REAL *f = (REAL *) malloc(sizeof(REAL) * n * m); REAL dx; /* grid spacing in x direction */ REAL dy; /* grid spacing in y direction */ initialize(n, m, alpha, &dx, &dy, u, f); memcpy(uomp, u, sizeof(REAL) * n * m); double elapsed = read_timer_ms(); jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits); elapsed = read_timer_ms() - elapsed; printf("seq elasped time(ms): %4f\n", elapsed); double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; printf("MFLOPS: %12.6g\n", mflops); puts("================"); elapsed = read_timer_ms(); jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits); elapsed = read_timer_ms() - elapsed; printf("OpenMP elasped time(ms): %4f\n", elapsed); mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; printf("MFLOPS: %12.6g\n", mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n, m, alpha, dx, dy, u, f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) { int i, j, k; REAL error; REAL ax; REAL ay; REAL b; REAL resid; REAL uold[n][m]; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha); error = (10.0 * tol); k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < (n - 1); i++) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = sqrt(error) / (n * m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n", k); printf("Residual: %.15g\n", error); } void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) { int i, j, k; REAL error; REAL ax; REAL ay; REAL b; REAL resid; REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m); REAL (*uold)[m] = (REAL (*)[m]) tmp; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha); error = (10.0 * tol); k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) #pragma omp simd simdlen(10) safelen(8) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < (n - 1); i++) #pragma omp simd reduction(+:resid,error) simdlen(8) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = sqrt(error) / (n * m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n", k); printf("Residual: %.15g\n", error); free(tmp); }
pr70680-2.c
/* PR middle-end/70680 */ int v; void f1 (void) { int i = 0, j = 0; #pragma omp task default(shared) if(0) { #pragma omp simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) ; v = i + j; } if (i != 10 || j != 10) __builtin_abort (); } void f2 (void) { int i = 0, j = 0; #pragma omp task default(shared) if(0) { #pragma omp simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) ; } if (i != 10 || j != 10) __builtin_abort (); } void f3 (void) { int i = 0, j = 0; #pragma omp task default(shared) if(0) { #pragma omp simd collapse(2) lastprivate (i, j) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) ; v = i + j; } if (i != 10 || j != 10) __builtin_abort (); } void f4 (void) { int i = 0, j = 0; #pragma omp task default(shared) if(0) { #pragma omp simd collapse(2) lastprivate (i, j) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) ; } if (i != 10 || j != 10) __builtin_abort (); } int main () { f1 (); if (v++ != 20) __builtin_abort (); f2 (); f3 (); if (v++ != 20) __builtin_abort (); f4 (); return 0; }
GB_binop__second_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_int8) // A.*B function (eWiseMult): GB (_AemultB_08__second_int8) // A.*B function (eWiseMult): GB (_AemultB_02__second_int8) // A.*B function (eWiseMult): GB (_AemultB_04__second_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int8) // A*D function (colscale): GB (_AxD__second_int8) // D*A function (rowscale): GB (_DxB__second_int8) // C+=B function (dense accum): GB (_Cdense_accumB__second_int8) // C+=b function (dense accum): GB (_Cdense_accumb__second_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // A pattern? 1 // B type: int8_t // B pattern? 0 // BinaryOp: cij = bij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__second_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #192] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" //r0 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "0: \n" "fmul v7.4s, v9.4s, %14.s[0] \n" "and v13.16b, %17.16b, %17.16b \n" // v13 = _bias0 "fmul v6.4s, v11.4s, %14.s[1] \n" "fmla v13.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v9.4s, v10.4s}, [%4] \n" "add %4, %4, #16 \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v13.4s, v12.4s, %15.s[2] \n" "fmul v8.4s, v9.4s, %14.s[0] \n" "and v15.16b, %17.16b, %17.16b \n" // v15 = _bias0 "fmul v14.4s, v11.4s, %14.s[1] \n" "fmla v15.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v9.4s, v10.4s}, [%5] \n" "add %5, %5, #16 \n" "fmla v7.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %16.s[1] \n" "fmla v13.4s, v12.4s, %16.s[2] \n" "fmla v8.4s, v9.4s, %15.s[0] \n" "fmla v14.4s, v11.4s, %15.s[1] \n" "fmla v15.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%6, #192] \n" "ld1 {v9.4s, v10.4s}, [%6] \n" "add %6, %6, #16 \n" "fmla v8.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v14.4s, v11.4s, %16.s[1] \n" "fmla v15.4s, v12.4s, %16.s[2] \n" "fadd v7.4s, v7.4s, v6.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" //ro, for next loop "fadd v8.4s, v8.4s, v14.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v8.4s, v8.4s, v15.4s \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" // for next loop "ext v12.16b, v9.16b, v10.16b, #8 \n" // for next loop "add %3, %3, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %3, %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "vmul.f32 q7, q9, %e14[0] \n" "vand q13, %q17, %q17 \n"// q13 = _bias0 "vmul.f32 q6, q11, %e14[1] \n" "vmla.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "vmul.f32 q8, q9, %e14[0] \n" "vand q15, %q17, %q17 \n"// q15 = _bias0 "vmul.f32 q14, q11, %e14[1] \n" "vmla.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); float32x4_t _sum2 = vmulq_f32(_r10, _k012x); _sum2 = vmlaq_f32(_sum2, _r20, _k345x); _sum2 = vmlaq_f32(_sum2, _r30, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); _sum2 = vsetq_lane_f32(bias0, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #192] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" //r0 "add %2, %2, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "0: \n" "fmul v7.4s, v8.4s, %10.s[0] \n" "and v14.16b, %13.16b, %13.16b \n" // v14 = _bias0 "fmul v13.4s, v10.4s, %10.s[1] \n" "fmla v14.4s, v11.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" //r1 "add %3, %3, #16 \n" "fmla v7.4s, v8.4s, %11.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %11.s[1] \n" "fmla v14.4s, v11.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" //r2 "add %4, %4, #16 \n" "fmla v7.4s, v8.4s, %12.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %12.s[1] \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" //r0, for next loop "add %2, %2, #16 \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v7.4s, v7.4s, v14.4s \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" // for next loop "ext v11.16b, v8.16b, v9.16b, #8 \n" // for next loop "st1 {v7.4s}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "vmul.f32 q7, q8, %e10[0] \n" "vand q14, %q13, %q13 \n"// q14 = _bias0 "vmul.f32 q13, q10, %e10[1] \n" "vmla.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "0: \n" "fmul v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "and v11.16b, %13.16b, %13.16b \n" // v11 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vand q11, %q13, %q13 \n" "0: \n" "vmul.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "vand q11, %q13, %q13 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
main_github.c
#include <stdio.h> #include <math.h> #include <gsl/gsl_sf_bessel.h> #include "global.h" #include "utility.h" #include "constants.h" #include <gsl/gsl_cblas.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <omp.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include "steppmethods.h" #define NUMBER_SAMPLES 1 #define NUMBER_VOLUMES 1 #define SHIFT_LATE 200 #define BINS 100 #define PDF_NR 12 // regular main but with multiple sampling runs over multiple volume fractions phi. int main (void){ FILE *source, *target; source = fopen("global.h", "r"); // get source global.h at program start so as to not copy edits during run if( source == NULL ) { printf("cannot open global.h\n"); exit(EXIT_FAILURE); } SETNUMTHREADS int i,j,l,k,m ,n; Constants(); // draw coupling constants and other derived constants double *t = calloc(LENGTH_T, sizeof(double)); time_t tme = time(NULL); char ordnerLABEL[300]; char ueberordnerLABEL[300]; char ordnerLABEL_original[300]; strftime(ordnerLABEL, sizeof ordnerLABEL, "%A %c", localtime(&tme)); double squares_all_calcs[LENGTH_T][NUMBER_SAMPLES]; // for printing all squares in shared table i = 0; printf("\n%s\n", ordnerLABEL); deleteSpaces(ordnerLABEL,ordnerLABEL); strcpy (ordnerLABEL_original, ordnerLABEL); // keep original, change ordnerLABEL per run double alpha_global = ALPHA; double temp_global = TEMP; // macros from global make trouble when used as print arguments, hence copy to memory sprintf(ueberordnerLABEL,"N=%d_SIM=%d_T=%1.1E_ALPH=%1.1f", OSSZI, SIMULATIONEN, temp_global, alpha_global); char tempLABEL[300]; strcpy (tempLABEL, ordnerLABEL); strcat(tempLABEL, LABELPREFIX); strcat(tempLABEL, ueberordnerLABEL); strcpy (ueberordnerLABEL, tempLABEL); struct stat st2 = {0}; double prob_density[BINS][PDF_NR]; for (i = 0; i < BINS ; i++) { for (j = 0; j < PDF_NR; j++) { prob_density[i][j] = 0.0; } } int check_times[PDF_NR]; for (j = 0; j < PDF_NR; j++) { check_times[j] = (int) ( 10.0 * (j+1)) ; } check_times[PDF_NR-1] = LENGTH_T - 1; for (j = 0; j < PDF_NR; j++) { printf("check times nr %d is %d \n", j, check_times[j]); } printf("\nprint results into "); printf(ueberordnerLABEL); if (stat(ueberordnerLABEL, &st2) == -1) { //Teste ob Ordner existiert, erstelle Ordner mkdir(ueberordnerLABEL, 0700); } if (chdir(ueberordnerLABEL)) // change into directory of simulation { printf("Error changing directory"); return 1; } // reserviere speicher fuer eine Vollständige trajektorie ------------------ m = LENGTH_T; n = ORDER; double **z = (double **) malloc(m * sizeof(double *)); z[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) z[i] = z[0] + n *i; double *squares = calloc(LENGTH_T, sizeof(double)); double *short_correlation_x = calloc(LENGTH_T, sizeof(double)); //short term (1 timestep) correlations double *short_correlation_y = calloc(LENGTH_T, sizeof(double)); double *total_momentum_x = calloc(LENGTH_T, sizeof(double)); double *total_momentum_y = calloc(LENGTH_T, sizeof(double)); double *long_correlation_x = calloc(LENGTH_T, sizeof(double)); //longterm correlation from first timestep double *long_correlation_y = calloc(LENGTH_T, sizeof(double)); double *squares_increase = calloc(LENGTH_T, sizeof(double)); // Increase a from assumed form x^2 0 = a t double real_volumes[10]; double *bathp = calloc(LENGTH_T, sizeof(double)); double *bathq = calloc(LENGTH_T, sizeof(double)); double *px_correlation = calloc(LENGTH_T, sizeof(double)); // save correlation of impulse_x to px(t=0) double *py_correlation = calloc(LENGTH_T, sizeof(double)); double *px_correlation_late = calloc(LENGTH_T, sizeof(double)); // corelation after time SHIFT_LATE double *px_correlation_late2 = calloc(LENGTH_T, sizeof(double)); // corelation after time SHIFT_LATE int n_zplots = SIMULATIONEN; if (SIMULATIONEN > MAX_NR_PLOTS ) n_zplots = MAX_NR_PLOTS; // speichere maximal 30*DIM trajectorien // reserviere speicher fuer samplepfade massives Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **zplots = (double **) malloc(m * sizeof(double *)); zplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) zplots[i] = zplots[0] + n *i; // reserviere speicher fuer samplepfade 1. bad Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **qplots = (double **) malloc(m * sizeof(double *)); qplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) qplots[i] = qplots[0] + n *i; // reserviere speicher fuer samplepfade letztes bad Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **qlplots = (double **) malloc(m * sizeof(double *)); qlplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) qlplots[i] = qlplots[0] + n *i; // reserviere speicher fuer samplepfade zusätzlicher Kac_Zwanzig Kraft Term ------------------ m = LENGTH_T; n = DIM*n_zplots; double **Zwanzig_Force = (double **) malloc(m * sizeof(double *)); Zwanzig_Force[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) Zwanzig_Force[i] = Zwanzig_Force[0] + n *i; // reserviere speicher fuer probability Density ------------------ m = 80; n = 80; double **P_density = (double **) malloc(m * sizeof(double *)); P_density[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) P_density[i] = P_density[0] + n *i; // reserviere speicher fuer Gitterkoordinaten ------------------ double DENSITY_POINTS = 0.0; for(m=0; m<80; m++) { for(n=0; n<80; n++) { P_density[m][n] = 0.0; } } double *EKIN = calloc(LENGTH_T, sizeof(double)); double *EBAD = calloc(LENGTH_T, sizeof(double)); double *ETOT = calloc(LENGTH_T, sizeof(double)); double *PTOT = calloc(LENGTH_T, sizeof(double)); double *PTOTY = calloc(LENGTH_T, sizeof(double)); double *LTOT = calloc(LENGTH_T, sizeof(double)); // GSL random number Setup for Taus Generator const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_ranlxd2; r = gsl_rng_alloc (T); gsl_rng_set(r, time(NULL)); // Seed with time // RNG setup End // // GSL random number Setup for Taus Generator for global rand const gsl_rng_type * T2; T2 = gsl_rng_ranlxd2; RAND_GLOBAL = gsl_rng_alloc (T2); gsl_rng_set(RAND_GLOBAL, time(NULL)); // Seed with time // RNG setup End // clock_t start, end; printf("evenly spread t \n"); //set up time vec for (i = 0; i < 10; i++) // first ten values in small steps { t[i] = i * TIME_STEPS/10.0; } for (i = 10; i < LENGTH_T; i++) { t[i] = (i-9) * TIME_STEPS; } double squarespace = 0.0; DIFF_COEFF = KBOLTZ * TEMP/GAMMA * sin(M_PI * ALPHA ) / (M_PI * ALPHA); if(ALPHA > 0.95) { DIFF_COEFF = KBOLTZ * TEMP/GAMMA; } LATTICE_SPACING = sqrt(2*DIM*DIFF_COEFF * pow( TIME_ARRIVAL, ALPHA) ); printf("LATTICE_SPACING = %3.3E\n", LATTICE_SPACING); double latlength = LATTICE_SPACING; //------------------------------EIGENTLICHE SIMULATION ------------------------------------------------------------------ int sim_count = 0; start = clock(); time_first_contact = 0.0; int error_count = 0; // for (i = 0; i < SIMULATIONEN; i++) { vec_zero_i(lattice_position,DIM); // set stating cell to zero ! TARGET_CONTACT = 0; FLAG_DUMP_VALUE = 0; // set to 1 if error occures // -------------------------BAD Bath_Setup(y, LABEL, r); // draw intial conditions //---------------------Bad aufgesetzt VVerlet_parallel(ORDER, y, z, t, Poti_Handle); end = clock(); //------------------------------auswertung Integrationsergebnisse if ( !(FLAG_DUMP_VALUE)) // only count result without err { double *kahan_c = calloc(LENGTH_T, sizeof(double)); // double *kahan_t = calloc(LENGTH_T, sizeof(double)); double *kahan_y = calloc(LENGTH_T, sizeof(double)); #pragma omp parallel for for(l = 0; l < LENGTH_T; l++) { if (l>10) { long_correlation_x[l] += z[1][0] *z[l][0]/((double) SIMULATIONEN); long_correlation_y[l] += z[1][1] *z[l][1]/((double) SIMULATIONEN); } kahan_y[l] = z[10][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP)\ - kahan_c[l]; kahan_t[l] = px_correlation[l] + kahan_y[l]; kahan_c[l] = (kahan_y[l] - px_correlation[l]) - kahan_y[l]; px_correlation[l] = kahan_t[l]; py_correlation[l] += z[10][DIM + 1] * z[l][DIM + 1] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); px_correlation_late[l] += z[SHIFT_LATE][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); px_correlation_late2[l] += z[SHIFT_LATE * 2][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); for(j=0; j< DIM; j++) { if (i < n_zplots) { zplots[l][DIM * i + j] = z[l][j]; qplots[l][DIM * i + j] = z[l][2*DIM + j]; double Force_TEMP = 0.0; for(int os = 0; os < OSSZI; os++) { Force_TEMP += (pow(coupling[os],2.0)/pow(ommega[os],2.0) - coupling[os]) * z[l][(2 + 2 * os) * DIM + j]; } Zwanzig_Force[l][DIM * i + j]=Force_TEMP; int i_last = OSSZI -1; qlplots[l][DIM * i + j] = z[l][2*OSSZI*DIM + j]; } squares[l] = squares[l] + pow(z[l][j],2.0)/((double) SIMULATIONEN); EKIN[l] = EKIN[l] + pow(z[l][DIM+j],2.0)/(mass * SIMULATIONEN * KBOLTZ *TEMP * DIM); for(k=0;k<OSSZI;k++) { bathq[l] = bathq[l] + pow(z[l][(2 + 2*k) *DIM +j],2.0)/( (double) (OSSZI*SIMULATIONEN)\ *KBOLTZ*TEMP) * pow(ommega[k],2.0)/2.0;// mittlere potentielle E pro Badteilchen //reskaliert durch kT bathp[l] = bathp[l] + pow(z[l][(3 + 2*k) *DIM +j],2.0)/( (double) (OSSZI*SIMULATIONEN)\ *KBOLTZ*TEMP) /2.0/massq[k]; // mittlere kinetische E pro Badteilchen //reskaliert durch kT EBAD[l] = EBAD[l] + pow(z[l][(3 + 2*k) *DIM +j], 2.0)/2.0\ + 0.5 * pow(ommega[k],2.0) \ * pow(z[l][(2 + 2*k) *DIM +j] - coupling[k]/(pow(ommega[k],2.0) ) * z[l][j] , 2.0) /((double) SIMULATIONEN); } } for(k=0;k<OSSZI;k++) // add p and angular momentum L for bath { PTOT[l] += z[l][(3 + 2*k) *DIM + 0]; PTOTY[l] += z[l][(3 + 2*k) *DIM + 1]; LTOT[l] += (z[l][(2 + 2*k) *DIM + 0] * z[l][(3 + 2*k) *DIM + 1]\ - z[l][(2 + 2*k) *DIM + 1] * z[l][(3 + 2*k) *DIM + 0]) /((double) SIMULATIONEN); } PTOT[l] += z[l][2]; PTOT[l] += z[l][3]; LTOT[l] += (z[l][0] * z[l][3] - z[l][1] * z[l][2]) / ((double) SIMULATIONEN); } if(DIM == 1) { // setup pdf checks double bins = BINS * 1.0; double dx = LATTICE_SPACING/bins; double sims = SIMULATIONEN; for (j = 0; j < PDF_NR; j++) { int t_check = check_times[j]; for (int i_bin = 0; i_bin < BINS; i_bin++) { double lower = -LATTICE_SPACING / 2.0 + i_bin * dx; double upper = -LATTICE_SPACING / 2.0 + (i_bin + 1) * dx; if ( ( z[t_check][0] <= upper) && ( z[t_check][0] > lower) ) { prob_density[i_bin][j] += 1.0/sims; } } } } sim_count += 1; //printf("%d und t %4.2f \n", i ,((double) (end - start))); printf("\r%d von %d mit t/count = %4.2f s und average t_rest =%4.2f h ", sim_count , SIMULATIONEN, (double) (end - start) / sim_count/ THREADS / CLOCKS_PER_SEC ,\ ((double) (end - start) / sim_count/ THREADS/ CLOCKS_PER_SEC * (SIMULATIONEN - sim_count)/3600)); printf(" %d hits on Lat and avrgtcntct = %4.2f",TARGET_CONTACT, time_first_contact/(i+1) ); fflush(stdout); for(l = 0; l < LENGTH_T; l++) { for (int oss_i=0; oss_i < OSSZI; oss_i++) { total_momentum_x[l] += z[l][(3 + 2*oss_i) *DIM + 0]; total_momentum_y[l] += z[l][(3 + 2*oss_i) *DIM + 1]; } total_momentum_x[l] += z[l][(0 + 2*0) *DIM + 0]; total_momentum_y[l] += z[l][(0 + 2*0) *DIM + 1]; } // -------------- end evaluation if---------------------- free(kahan_y); free(kahan_t); free(kahan_c); }else { error_count++; printf("\n err occured at calc %d, calculation dumped, %d totatl errors\n", i,error_count ); i -= 1; // do one more calculation } } tme = time(NULL); char end_label[90]; strftime( end_label, sizeof end_label, "%A %c", localtime(&tme)); printf("\n%s\n", end_label); for(l = 0; l < LENGTH_T; l++) { ETOT[l] = EKIN[l] + EBAD[l]; PTOT[l] = sqrt(pow(PTOT[l]/((double) SIMULATIONEN),2.0) + pow(PTOTY[l]/((double) SIMULATIONEN),2.0)); } for(l = 1; l < LENGTH_T; l++) { squares_increase[l] = (squares[l]- squares[l-1])/(t[l] - t[l-1]); } //------------------------------ENDE SIMULATION, speichere daten ------------------------------------------------------------------ for(m=0; m<80; m++) { for(n=0; n<80; n++) { P_density[m][n] *= 1.0/DENSITY_POINTS; } } FILE *fp; struct stat st = {0}; if (stat(ordnerLABEL, &st) == -1){ //Teste ob Ordner existiert, erstelle Ordner mkdir(ordnerLABEL, 0700); } fp = fopen ("shellscript.sh", "w"); //create Shellscript to start Gnuplot from c main() fprintf(fp, "cd %s\n", ordnerLABEL); fprintf(fp, "gnuplot gnuplot.txt\n"); fprintf(fp, "cd .."); fclose (fp); // copy global.h / // copy to same name into directory, clould be anything different if (chdir(ordnerLABEL)) // change into directory of simulation { printf("Error changing directory"); return 1; } mkdir("plots", 0700); mkdir("trajec", 0700); target = fopen("global.h", "w"); if( target == NULL ) { fclose(source); printf("Press any key to exit...\n"); exit(EXIT_FAILURE); } char ch; while( ( ch = fgetc(source) ) != EOF ) { fputc(ch, target); } printf("File copied successfully.\n"); fclose(target); fp = fopen ("latlength.dat", "w"); fprintf(fp, "%lf \n", latlength); fclose (fp); fp = fopen ("squares_rohdaten.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l], squares[l]); } fclose (fp); fp = fopen ("ekin.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, EKIN[l]); } fclose (fp); fp = fopen ("ekinbath.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, bathp[l]); } fclose (fp); fp = fopen ("ommega.dat", "w"); for(l = 0; l < OSSZI; l++){ fprintf(fp, "%lf\n", ommega[l]); } fclose (fp); fp = fopen ("PTOT.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, PTOT[l]); } fclose (fp); fp = fopen ("P_X.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %1.3e\n", t[l]/TIME_ARRIVAL, total_momentum_x[l]); } fclose (fp); fp = fopen ("P_Y.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %1.3e\n", t[l]/TIME_ARRIVAL, total_momentum_y[l]); } fclose (fp); fp = fopen ("xshortcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, short_correlation_x[l]/latlength/latlength); } fclose (fp); fp = fopen ("yshortcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, short_correlation_y[l]/latlength/latlength); } fclose (fp); fp = fopen ("xlongcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, long_correlation_x[l]/latlength/latlength); } fclose (fp); fp = fopen ("ylongcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, long_correlation_y[l]/latlength/latlength); } fclose (fp); fp = fopen ("PX_corr.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation[l]); } fclose (fp); fp = fopen ("PY_corr.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, py_correlation[l]); } fclose (fp); fp = fopen ("PX_corr_late.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation_late[l]); } fclose (fp); fp = fopen ("PX_corr_late2.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation_late2[l]); } fclose (fp); fp = fopen ("PX_corr_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation[l])); } fclose (fp); fp = fopen ("PX_corr_late_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation_late[l])); } fclose (fp); fp = fopen ("PX_corr_late2_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation_late2[l])); } fclose (fp); if (DIM == 1) { fp = fopen ("PDF_1D.dat", "w"); for (int pos = 0; pos < BINS; pos++) { fprintf(fp, "%1.3E ", -LATTICE_SPACING/2.0 + pos*LATTICE_SPACING/((double) BINS)); for(j = 0; j < PDF_NR; j++) { fprintf(fp, "%1.3E ", prob_density[pos][j]); } fprintf(fp,"\n"); } fclose (fp); } // finde größtes s^2/t^alpha zum endzeitpunkt int t_start = 10; double temp_ende = 0.0; for(int i_alpha = 1; i_alpha < 12; i_alpha ++){ double alpha_temp = 0.45 + 0.05 * i_alpha; l = LENGTH_T-1; if (temp_ende < (squares[l] / (pow(t[l], alpha_temp))) ) { temp_ende = squares[l] / (pow(t[l], alpha_temp)); } } if (DIM>1) { char zplotsLABEL[30]; for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/zplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((zplots[l][j +i*DIM])/latlength) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/qplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((qplots[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/Zwanzig_Force%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((Zwanzig_Force[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/qlplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((qlplots[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } } if (DIM==1) { char zplotsLABEL[30]; for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/zplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, " %lf %lf \n" ,t[l]/TIME_ARRIVAL, zplots[l][i*DIM]/latlength); } fclose (fp); } } fp = fopen ("ETOT.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l], ETOT[l]); } fclose (fp); fp = fopen ("DENSITY.dat", "w"); for(m=0; m<80; m++) { for(n=0; n<80; n++) { fprintf(fp, "%1.2e ", P_density[m][n]); } fprintf(fp, "\n"); } fclose (fp); gsl_rng_free (r); gsl_rng_free (RAND_GLOBAL); free(zplots[0]); free(zplots); free(qplots[0]); free(qplots); free(Zwanzig_Force[0]); free(Zwanzig_Force); free(qlplots[0]); free(qlplots); free (P_density[0]); free (P_density); free(t); free(bathp); free(bathq); free(squares_increase); free(short_correlation_x); free(total_momentum_x); free(total_momentum_y); free(short_correlation_y); free(long_correlation_x); free(px_correlation); free(py_correlation); free(px_correlation_late); free(px_correlation_late2); free(long_correlation_y); free(ETOT); free(EKIN); free(EBAD); free(PTOT); free(PTOTY);free(LTOT); free(squares); free(z[0]); free(z); }
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim */ #include "gklib/GKlib.h" #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon; size_t lnlen; ssize_t *rowptr; int *rowind, ival; float *rowval=NULL, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_CSR_FMT_BINROW) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_BINCOL) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == GK_CSR_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; FILE *fpout; if (format == GK_CSR_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; } if (format == GK_CSR_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; } if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; }
GB_unaryop__minv_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_uint8 // op(A') function: GB_tran__minv_fp64_uint8 // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_uint8 ( double *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matvec_cpu_omp_kernel.c
#include "matvec.h" #include <homp.h> #ifdef USE_INTEL_MKL #include <mkl.h> #endif void matvec_cpu_omp_wrapper(omp_offloading_t *off, long n, long start_n, long length_n,REAL *a,REAL *x,REAL *y) { int num_omp_threads = off->dev->num_cores; int i, j; #ifdef USE_INTEL_MKL mkl_mic_disable(); REAL alpha = 1; REAL beta = 0; #endif #ifdef USE_INTEL_MKL #pragma omp parallel shared(y, x, a, start_n, length_n) private(i,j) num_threads(num_omp_threads) cblas_sgemv(CblasColMajor, CblasNoTrans, length_n , n, alpha, a, length_n, x, 1, beta, y, 1); //mkl_mic_enable(); #else #pragma omp parallel for simd shared(y, x, a, start_n, length_n) private(i,j) num_threads(num_omp_threads) for (i = start_n; i < start_n + length_n; i++) { for (j = 0; j < n; j++) y[i] += a[i*n + j] * x[j]; //printf ("error part!!"); } #endif }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view)); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
5_parallel_queue_infinite_enqueue_dequeue.c
/* Program : 5 Author : Anish Topic : Write a C program using OpenMP features to create two parallel threads to simulate a linear queue. The first thread should implement the insert operation on the linear queue. The second thread should implement the remove operation on the linear queue. Both the threads should run infinitely. */ #include<stdio.h> #include<omp.h> #include<stdlib.h> int main() { int n,a,num = 0; printf("\n ENTER THE VALUE OF N \n"); scanf("%d",&n); int id,d,Q[n],rear=-1,front=-1; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id=omp_get_thread_num(); if(id==0) //insert { while(1) { #pragma omp critical { if(rear<n-1) { Q[++rear]=num; printf("\n INSERTED ITEM IS %d",num); num++; } else printf("\n NO SPACE"); //fgetc(stdin); } } } else { while(1) //pop { #pragma omp critical { if(front == rear && front != -1) { d=Q[front]; front = -1; rear = -1; printf("\n DELETED ITEM IS %d",d); } if(front<rear) { d=Q[front]; front++; printf("\n DELETED ITEM IS %d",d); } else printf("\n NO ITEMS TO DELETE"); //fgetc(stdin); } } } } return 0; }
outlier_detection.h
#ifndef __OUTLIER_DETECTION_H #define __OUTLIER_DETECTION_H #include <algorithm> #include <omp.h> #include "distribution.h" #include "mixture_model.h" #include "samples.h" #include "sphere_volume.h" #include "kdtree-eigen/kdtree_eigen.h" #define FAIL_ON_ZERO_CDF 0 #define USE_MAX_KEEP 0 namespace jmm { template<typename Samples> bool isValidSample(const Samples& samples, int sample_i) { if(!std::isfinite(samples.weights(sample_i))) { std::cerr << "inf or nan sample in outlier detection, id=" << sample_i << ", value=" << samples.weights(sample_i) << '\n'; return false; } return samples.weights(sample_i) > 0 && // samples.isDiffuse(sample_i) && samples.discounts(sample_i) == 0; } template< int t_dims, int t_components, int t_conditionalDims, typename Scalar, template<int, int, typename> class Component, template<int, int, typename> class Marginal > void normalizeModel( Samples<t_dims, Scalar>& samples, MixtureModel<t_dims, t_components, t_conditionalDims, Scalar, Component, Marginal>& distribution ) { Scalar surfaceIntegral = 0.f, surfaceArea = 0.f, sampleMean = 0.f; #pragma omp parallel for reduction(+: surfaceIntegral, surfaceArea, sampleMean) for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { Scalar pdf = 0.f, marginalPdf = 0.f; // assert(false); // TODO: next line is needed. pdf = distribution.pdf(samples.samples.col(sample_i)); marginalPdf = distribution.marginalPdf(samples.samples.col(sample_i)); // distribution.pdfAndMarginalPdfPrune( // samples.samples.col(sample_i), pdf, marginalPdf // ); surfaceIntegral += marginalPdf; // samples.stateDensities(sample_i); surfaceArea += 1.f; // / samples.stateDensities(sample_i); // Scalar cos = samples.normals.col(sample_i).transpose() * samples.samples.col(sample_i).bottomRows(3); // sampleMean += cos * samples.weights(sample_i); sampleMean += samples.weights(sample_i); } surfaceIntegral /= (Scalar) samples.size(); surfaceArea /= (Scalar) samples.size(); sampleMean /= (Scalar) samples.size(); distribution.setSurfaceIntegral(surfaceIntegral); distribution.setSurfaceArea(surfaceArea); // distribution.setNormalization(sampleMean); } template< int t_dims, int t_components, int t_conditionalDims, typename Scalar, template<int, int, typename> class Component, template<int, int, typename> class Marginal > Eigen::Matrix<Scalar, Eigen::Dynamic, 1> sarsaError( Samples<t_dims, Scalar>& samples, const MixtureModel<t_dims, t_components, t_conditionalDims, Scalar, Component, Marginal>& distribution ) { std::cerr << "Computing sarsa error.\n"; int nSamples = samples.size(); Eigen::Matrix<Scalar, Eigen::Dynamic, 1> marginalPdfs(nSamples, 1), pdfs(nSamples, 1); Scalar surfaceIntegral = 0.f, sampleMean = 0.f; #pragma omp parallel for reduction(+: surfaceIntegral, sampleMean) for(int sample_i = 0; sample_i < nSamples; ++sample_i) { pdfs(sample_i) = marginalPdfs(sample_i) = 0; assert(false); // TODO: next line is needed. // distribution.pdfAndMarginalPdfPrune( // samples.samples.col(sample_i), pdfs(sample_i), marginalPdfs(sample_i) // ); surfaceIntegral += marginalPdfs(sample_i) / samples.stateDensities(sample_i); sampleMean += samples.weights(sample_i); } surfaceIntegral /= (Scalar) nSamples; sampleMean /= (Scalar) nSamples; Eigen::Matrix<Scalar, Eigen::Dynamic, 1> error(nSamples, 1); #pragma omp parallel for schedule(static) for(int sample_i = 0; sample_i < nSamples; ++sample_i) { if(!isValidSample(samples, sample_i)) { error(sample_i) = 0; continue; } Scalar reward = samples.rewards(sample_i) / sampleMean; if(pdfs(sample_i) == 0 || marginalPdfs(sample_i) == 0) { error(sample_i) = reward; continue; } Scalar pdf = pdfs(sample_i); Scalar conditionalPdf = pdf / marginalPdfs(sample_i); Scalar marginalNorm = surfaceIntegral; Scalar marginalPdf = marginalPdfs(sample_i) / marginalNorm; Scalar misPdf; if(samples.isDiffuse(sample_i)) { misPdf = distribution.heuristicWeight() * samples.heuristicPdfs(sample_i) + (1.f - distribution.heuristicWeight()) * marginalPdf * conditionalPdf; } else { misPdf = marginalPdf * conditionalPdf; } error(sample_i) = reward - misPdf; } return error; } template<int t_dims, int t_conditionDims, typename Scalar> void estimateStateDensity(Samples<t_dims, Scalar>& samples) { using KDTree = kdt::KDTree<Scalar, kdt::EuclideanDistance<Scalar>>; int nSamples = samples.size(); Eigen::Matrix<Scalar, t_conditionDims, Eigen::Dynamic> samplesCopy = samples.samples.topLeftCorner(t_conditionDims, nSamples); KDTree kdtree(samplesCopy, true); kdtree.setSorted(false); kdtree.setTakeRoot(false); kdtree.build(); typename KDTree::Matrix distsSqr; typename KDTree::MatrixI idx; size_t knn = 15; kdtree.query(samplesCopy, knn, idx, distsSqr); Scalar knnNorm = (Scalar) (knn - 1) / (Scalar) (nSamples - 1); constexpr int manifoldDims = 2; #pragma omp parallel for for(int sample_i = 0; sample_i < nSamples; ++sample_i) { Scalar maxDistance = std::sqrt(distsSqr.col(sample_i).maxCoeff()); if(maxDistance == 0.f) { std::cerr << "maxDistance=0 for sample id=" << sample_i << ", value=" << samples.weights(sample_i) << '\n'; continue; } Scalar volume = jmm::volume_norm<manifoldDims>::value * std::pow(maxDistance, manifoldDims); samples.stateDensities(sample_i) = knnNorm / volume; Scalar correctedWeight = samples.weights(sample_i) / samples.stateDensities(sample_i); if(!std::isfinite(correctedWeight)) { std::cerr << "spatially reweighted sample inf or nan, id=" << sample_i << ", value=" << samples.weights(sample_i) << ", reweighted value=" << correctedWeight << '\n'; continue; } samples.weights(sample_i) = correctedWeight; } // Eigen::Matrix<Scalar, 1, Eigen::Dynamic> maxDistances = dists.colwise().maxCoeff(); // Eigen::Matrix<Scalar, Eigen::Dynamic, 1> volume = // jmm::volume_norm<manifoldDims>::value * maxDistances.array().pow(manifoldDims); // Eigen::Matrix<Scalar, 1, Eigen::Dynamic> density = knnNorm / volume.array(); // m_samples->stateDensities.topRows(nSamples) = density.transpose(); // m_samples->weights.topRows(nSamples).array() /= m_samples->stateDensities.topRows(nSamples).array(); // m_samples->samplingPdfs.topRows(nSamples).array() *= m_samples->stateDensities.topRows(nSamples).array(); } // class OutlierDetection { // constexpr static int t_dims = 5; // constexpr static int t_components = 1024; // constexpr static int t_conditionalDims = 2; // using Scalar = float; // using MM = jmm::MixtureModel<t_dims, t_components, t_conditionalDims>; // using MMCond = jmm::MixtureModel<t_conditionalDims, t_components, 0>; // using MMScalar = typename MM::Scalar; // using Vectord = typename MM::Vectord; // using Matrixd = typename MM::Matrixd; // using ConditionalVectord = typename MMCond::Vectord; // using ConditionalMatrixd = typename MMCond::Matrixd; // void detectOutliers( // const MM& distribution, // const Samples<t_dims, Scalar>& samples, // size_t knn, // int minOutliers, // Samples<t_dims, Scalar>& outliers // ) { // int nSamples = samples.size(); // Scalar totalWeight = samples.weights.topRows(nSamples).sum(); // using KDTree = kdt::KDTree<Scalar, kdt::EuclideanDistance<Scalar>>; // Eigen::Matrix<Scalar, t_dims, Eigen::Dynamic> samplesCopy = // samples.samples.topLeftCorner(t_dims, nSamples); // KDTree kdtree(samplesCopy, true); // kdtree.setSorted(false); // kdtree.build(); // KDTree::Matrix dists; // KDTree::MatrixI idx; // kdtree.query(samplesCopy, knn, idx, dists); // outliers.clear(); // outliers.reserve(nSamples); // Scalar knnNorm = (Scalar) (knn - 1) / (Scalar) (nSamples); // Eigen::Matrix<Scalar, 1, Eigen::Dynamic> maxDistances = dists.colwise().maxCoeff(); // Eigen::Matrix<Scalar, 1, Eigen::Dynamic> volume = volume_norm<t_dims>::value * maxDistances.array().pow(t_dims); // Eigen::Matrix<Scalar, 1, Eigen::Dynamic> density = knnNorm * 1.f / volume.array(); // std::vector<std::pair<int, Scalar>> metricSort; metricSort.reserve(nSamples); // for(int sample_i = 0; sample_i < nSamples; ++sample_i) { // Scalar sum = 0, sum_sq = 0; // for(int nn_i = 0; nn_i < knn; ++nn_i) { // Scalar weight = samples.weights(idx(nn_i, sample_i)) / totalWeight; // * (logFunctionPdf(idx(nn_i, sample_i)) - logLearnedPdf(idx(nn_i, sample_i))); // sum += weight; // sum_sq += weight * weight; // } // Scalar variance = (sum_sq / (Scalar) knn - sum * sum / ((Scalar) knn * knn)) * ((Scalar) knn) / (Scalar) knn * density(sample_i); // Scalar normalizedSampleWeight = samples.weights(sample_i) / totalWeight; // Scalar metric = variance * normalizedSampleWeight * normalizedSampleWeight; // metricSort.emplace_back(sample_i, metric); // } // int keepForInit = (int) std::max(0.002f * (Scalar) outliers.size(), (Scalar) minOutliers + 1); // std::sort(metricSort.begin(), metricSort.end(), // [](const auto &a, const auto &b){ return a.second > b.second; }); // metricSort.erase(metricSort.begin() + keepForInit, metricSort.end()); // // TODO: we used to do some denoising here. Was it useful? // // Scalar totalDenoisedWeight = 0.f; // // Scalar outlierDenoisedWeight = 0.f; // // for(int outlier_i = 0; outlier_i < (int) metricSort.size(); ++outlier_i) { // // int sample_i = metricSort[outlier_i].first; // // } // } // }; } #endif /* __OUTLIER_DETECTION_H */
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/token-private.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; if (image_info->view != (char *) NULL) (void) CloneString(&clone_info->view,image_info->view); if (image_info->authenticate != (char *) NULL) (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MaxTextExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),value,(size_t) (MaxTextExtent-(p-format-offset))); offset+=strlen(pattern)-4; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; const void *pixels; MagickBooleanType status; MagickSizeType length; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset((void *) pixels,0,(size_t) length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,sizeof(PixelPacket)); if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) indexes[x]=0; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize", image->filename); image->columns=columns; image->rows=rows; if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType)))) ThrowBinaryImageException(ImageError,"ImageDepthNotSupported", image->filename); return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*extension != '\0') && (IsGlob(extension) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,filename); else GetPathComponent(image_info->filename,SubcanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to a seekable temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) memset(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) image->units=units; option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
taskloop.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | FileCheck --check-prefix=TASKS %s // REQUIRES: ompt // These compilers don't support the taskloop construct // UNSUPPORTED: gcc-4, gcc-5, icc-16 // GCC 6 has support for taskloops, but at least 6.3.0 is crashing on this test // UNSUPPORTED: gcc-6 #include "callback.h" #include <omp.h> int main() { unsigned int i, x; #pragma omp parallel num_threads(2) { #pragma omp barrier #pragma omp master #pragma omp taskloop for (i = 0; i < 5; i += 3) { x++; } } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parent_task_id={{[0-9]+}} // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]] // CHECK-SAME: requested_team_size=2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1:[0-9]+]] // CHECK-SAME: team_size=2, thread_num=0 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskgroup_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]], count=2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: new_task_id=[[TASK_ID1:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS]] // CHECK-SAME: task_type=ompt_task_explicit=4 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: new_task_id=[[TASK_ID2:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS]] // CHECK-SAME: task_type=ompt_task_explicit=4 // CHECK-NOT: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: count=2 // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_begin: // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskgroup_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0 // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1]], team_size=2, thread_num=0 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // TASKS: ompt_event_task_create:{{.*}} new_task_id={{[0-9]+}} // TASKS-SAME: task_type=ompt_task_initial // TASKS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_taskloop_begin: // TASKS: ompt_event_task_create:{{.*}} new_task_id=[[TASK_ID1:[0-9]+]] // TASKS-SAME: task_type=ompt_task_explicit // TASKS-DAG: ompt_event_task_create:{{.*}} new_task_id=[[TASK_ID2:[0-9]+]] // Schedule events: // TASKS-DAG: {{^.*}}first_task_id={{[0-9]+}}, second_task_id=[[TASK_ID1]] // TASKS-DAG: {{^.*}}first_task_id=[[TASK_ID1]], second_task_id={{[0-9]+}} // TASKS-DAG: {{^.*}}first_task_id={{[0-9]+}}, second_task_id=[[TASK_ID2]] // TASKS-DAG: {{^.*}}first_task_id=[[TASK_ID2]], second_task_id={{[0-9]+}} // TASKS-NOT: ompt_event_task_schedule return 0; }
barr-check.c
int main() { int X = 0; int Y = 0; #pragma omp parallel { int abc; while (1) { abc = 0 + 3; #pragma omp atomic Y = Y + 1; #pragma omp barrier #pragma omp atomic Y = Y + 2; #pragma omp barrier if (!abc) { #pragma omp single nowait { X = X + 1; } break; } #pragma omp atomic Y = Y + 1; X = 1; } } }
GB_unaryop__lnot_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_fp64 // op(A') function: GB_tran__lnot_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_fp64 ( int32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sm2_sign.c
/* * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2017 Ribose Inc. All Rights Reserved. * Ported from Ribose contributions from Botan. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include "internal/sm2.h" #include "internal/sm2err.h" #include "internal/ec_int.h" /* ec_group_do_inverse_ord() */ #include "internal/numbers.h" #include <openssl/err.h> #include <openssl/evp.h> #include <openssl/err.h> #include <openssl/bn.h> #include <string.h> #ifdef SM2_BENCHMARK #include <time.h> #include <sys/time.h> #ifdef SM2_BENCHMARK_PARALLEL #include <omp.h> #endif #endif int sm2_compute_z_digest(uint8_t *out, const EVP_MD *digest, const uint8_t *id, const size_t id_len, const EC_KEY *key) { int rc = 0; const EC_GROUP *group = EC_KEY_get0_group(key); BN_CTX *ctx = NULL; EVP_MD_CTX *hash = NULL; BIGNUM *p = NULL; BIGNUM *a = NULL; BIGNUM *b = NULL; BIGNUM *xG = NULL; BIGNUM *yG = NULL; BIGNUM *xA = NULL; BIGNUM *yA = NULL; int p_bytes = 0; uint8_t *buf = NULL; uint16_t entl = 0; uint8_t e_byte = 0; hash = EVP_MD_CTX_new(); ctx = BN_CTX_new(); if (hash == NULL || ctx == NULL) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_MALLOC_FAILURE); goto done; } p = BN_CTX_get(ctx); a = BN_CTX_get(ctx); b = BN_CTX_get(ctx); xG = BN_CTX_get(ctx); yG = BN_CTX_get(ctx); xA = BN_CTX_get(ctx); yA = BN_CTX_get(ctx); if (yA == NULL) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_MALLOC_FAILURE); goto done; } if (!EVP_DigestInit(hash, digest)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_EVP_LIB); goto done; } /* Z = h(ENTL || ID || a || b || xG || yG || xA || yA) */ if (id_len >= (UINT16_MAX / 8)) { /* too large */ SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, SM2_R_ID_TOO_LARGE); goto done; } entl = (uint16_t)(8 * id_len); e_byte = entl >> 8; if (!EVP_DigestUpdate(hash, &e_byte, 1)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_EVP_LIB); goto done; } e_byte = entl & 0xFF; if (!EVP_DigestUpdate(hash, &e_byte, 1)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_EVP_LIB); goto done; } if (id_len > 0 && !EVP_DigestUpdate(hash, id, id_len)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_EVP_LIB); goto done; } if (!EC_GROUP_get_curve(group, p, a, b, ctx)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_EC_LIB); goto done; } p_bytes = BN_num_bytes(p); buf = OPENSSL_zalloc(p_bytes); if (buf == NULL) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_MALLOC_FAILURE); goto done; } if (BN_bn2binpad(a, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || BN_bn2binpad(b, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || !EC_POINT_get_affine_coordinates(group, EC_GROUP_get0_generator(group), xG, yG, ctx) || BN_bn2binpad(xG, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || BN_bn2binpad(yG, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || !EC_POINT_get_affine_coordinates(group, EC_KEY_get0_public_key(key), xA, yA, ctx) || BN_bn2binpad(xA, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || BN_bn2binpad(yA, buf, p_bytes) < 0 || !EVP_DigestUpdate(hash, buf, p_bytes) || !EVP_DigestFinal(hash, out, NULL)) { SM2err(SM2_F_SM2_COMPUTE_Z_DIGEST, ERR_R_INTERNAL_ERROR); goto done; } rc = 1; done: OPENSSL_free(buf); BN_CTX_free(ctx); EVP_MD_CTX_free(hash); return rc; } static BIGNUM *sm2_compute_msg_hash(const EVP_MD *digest, const EC_KEY *key, const uint8_t *id, const size_t id_len, const uint8_t *msg, size_t msg_len) { EVP_MD_CTX *hash = EVP_MD_CTX_new(); const int md_size = EVP_MD_size(digest); uint8_t *z = NULL; BIGNUM *e = NULL; if (md_size < 0) { SM2err(SM2_F_SM2_COMPUTE_MSG_HASH, SM2_R_INVALID_DIGEST); goto done; } z = OPENSSL_zalloc(md_size); if (hash == NULL || z == NULL) { SM2err(SM2_F_SM2_COMPUTE_MSG_HASH, ERR_R_MALLOC_FAILURE); goto done; } if (!sm2_compute_z_digest(z, digest, id, id_len, key)) { /* SM2err already called */ goto done; } if (!EVP_DigestInit(hash, digest) || !EVP_DigestUpdate(hash, z, md_size) || !EVP_DigestUpdate(hash, msg, msg_len) /* reuse z buffer to hold H(Z || M) */ || !EVP_DigestFinal(hash, z, NULL)) { SM2err(SM2_F_SM2_COMPUTE_MSG_HASH, ERR_R_EVP_LIB); goto done; } e = BN_bin2bn(z, md_size, NULL); if (e == NULL) SM2err(SM2_F_SM2_COMPUTE_MSG_HASH, ERR_R_INTERNAL_ERROR); done: OPENSSL_free(z); EVP_MD_CTX_free(hash); return e; } static ECDSA_SIG *sm2_sig_gen(const EC_KEY *key, const BIGNUM *e) { const BIGNUM *dA = EC_KEY_get0_private_key(key); const EC_GROUP *group = EC_KEY_get0_group(key); const BIGNUM *order = EC_GROUP_get0_order(group); ECDSA_SIG *sig = NULL; EC_POINT *kG = NULL; BN_CTX *ctx = NULL; BIGNUM *k = NULL; BIGNUM *rk = NULL; BIGNUM *r = NULL; BIGNUM *s = NULL; BIGNUM *x1 = NULL; BIGNUM *tmp = NULL; kG = EC_POINT_new(group); ctx = BN_CTX_new(); if (kG == NULL || ctx == NULL) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_MALLOC_FAILURE); goto done; } BN_CTX_start(ctx); k = BN_CTX_get(ctx); rk = BN_CTX_get(ctx); x1 = BN_CTX_get(ctx); tmp = BN_CTX_get(ctx); if (tmp == NULL) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_MALLOC_FAILURE); goto done; } /* * These values are returned and so should not be allocated out of the * context */ r = BN_new(); s = BN_new(); if (r == NULL || s == NULL) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_MALLOC_FAILURE); goto done; } for (;;) { if (!BN_priv_rand_range(k, order)) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_INTERNAL_ERROR); goto done; } if (!EC_POINT_mul(group, kG, k, NULL, NULL, ctx) || !EC_POINT_get_affine_coordinates(group, kG, x1, NULL, ctx) || !BN_mod_add(r, e, x1, order, ctx)) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_INTERNAL_ERROR); goto done; } /* try again if r == 0 or r+k == n */ if (BN_is_zero(r)) continue; if (!BN_add(rk, r, k)) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_INTERNAL_ERROR); goto done; } if (BN_cmp(rk, order) == 0) continue; if (!BN_add(s, dA, BN_value_one()) || !ec_group_do_inverse_ord(group, s, s, ctx) || !BN_mod_mul(tmp, dA, r, order, ctx) || !BN_sub(tmp, k, tmp) || !BN_mod_mul(s, s, tmp, order, ctx)) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_BN_LIB); goto done; } sig = ECDSA_SIG_new(); if (sig == NULL) { SM2err(SM2_F_SM2_SIG_GEN, ERR_R_MALLOC_FAILURE); goto done; } /* takes ownership of r and s */ ECDSA_SIG_set0(sig, r, s); break; } done: if (sig == NULL) { BN_free(r); BN_free(s); } BN_CTX_free(ctx); EC_POINT_free(kG); return sig; } static int sm2_sig_verify(const EC_KEY *key, const ECDSA_SIG *sig, const BIGNUM *e) { int ret = 0; const EC_GROUP *group = EC_KEY_get0_group(key); const BIGNUM *order = EC_GROUP_get0_order(group); BN_CTX *ctx = NULL; EC_POINT *pt = NULL; BIGNUM *t = NULL; BIGNUM *x1 = NULL; const BIGNUM *r = NULL; const BIGNUM *s = NULL; ctx = BN_CTX_new(); pt = EC_POINT_new(group); if (ctx == NULL || pt == NULL) { SM2err(SM2_F_SM2_SIG_VERIFY, ERR_R_MALLOC_FAILURE); goto done; } BN_CTX_start(ctx); t = BN_CTX_get(ctx); x1 = BN_CTX_get(ctx); if (x1 == NULL) { SM2err(SM2_F_SM2_SIG_VERIFY, ERR_R_MALLOC_FAILURE); goto done; } /* * B1: verify whether r' in [1,n-1], verification failed if not * B2: vefify whether s' in [1,n-1], verification failed if not * B3: set M'~=ZA || M' * B4: calculate e'=Hv(M'~) * B5: calculate t = (r' + s') modn, verification failed if t=0 * B6: calculate the point (x1', y1')=[s']G + [t]PA * B7: calculate R=(e'+x1') modn, verfication pass if yes, otherwise failed */ ECDSA_SIG_get0(sig, &r, &s); if (BN_cmp(r, BN_value_one()) < 0 || BN_cmp(s, BN_value_one()) < 0 || BN_cmp(order, r) <= 0 || BN_cmp(order, s) <= 0) { SM2err(SM2_F_SM2_SIG_VERIFY, SM2_R_BAD_SIGNATURE); goto done; } if (!BN_mod_add(t, r, s, order, ctx)) { SM2err(SM2_F_SM2_SIG_VERIFY, ERR_R_BN_LIB); goto done; } if (BN_is_zero(t)) { SM2err(SM2_F_SM2_SIG_VERIFY, SM2_R_BAD_SIGNATURE); goto done; } if (!EC_POINT_mul(group, pt, s, EC_KEY_get0_public_key(key), t, ctx) || !EC_POINT_get_affine_coordinates(group, pt, x1, NULL, ctx)) { SM2err(SM2_F_SM2_SIG_VERIFY, ERR_R_EC_LIB); goto done; } if (!BN_mod_add(t, e, x1, order, ctx)) { SM2err(SM2_F_SM2_SIG_VERIFY, ERR_R_BN_LIB); goto done; } if (BN_cmp(r, t) == 0) ret = 1; done: EC_POINT_free(pt); BN_CTX_free(ctx); return ret; } ECDSA_SIG *sm2_do_sign(const EC_KEY *key, const EVP_MD *digest, const uint8_t *id, const size_t id_len, const uint8_t *msg, size_t msg_len) { BIGNUM *e = NULL; ECDSA_SIG *sig = NULL; #ifdef SM2_BENCHMARK int i, rep = 256000; clock_t c_start, c_end; struct timeval tval_start, tval_end, tval_diff; double cpu_time, wall_time; #endif e = sm2_compute_msg_hash(digest, key, id, id_len, msg, msg_len); if (e == NULL) { /* SM2err already called */ goto done; } sig = sm2_sig_gen(key, e); #ifdef SM2_BENCHMARK #ifdef SM2_BENCHMARK_PARALLEL for (int thr = 1; thr <= 64; thr *= 2) { omp_set_num_threads(thr); printf("Start SM2 signature sign benchmark: thr %d.\n", thr); #else printf("Start SM2 signature sign benchmark.\n"); #endif c_start = clock(); gettimeofday(&tval_start, NULL); #ifdef SM2_BENCHMARK_PARALLEL #pragma omp parallel for schedule(static) #endif for (i = 0; i < rep; i++) { sm2_sig_gen(key, e); } gettimeofday(&tval_end, NULL); c_end = clock() - c_start; timersub(&tval_end, &tval_start, &tval_diff); cpu_time = (double)c_end / CLOCKS_PER_SEC; wall_time = (long)tval_diff.tv_sec + (double)tval_diff.tv_usec / 1000000; printf("Benchmark finished. CPU time: %lf s, Wall time: %lf s.\n", cpu_time, wall_time); printf("Speed is: %lf (CPU) / %lf (Wall) sign/s.\n", rep / cpu_time, rep / wall_time); #ifdef SM2_BENCHMARK_PARALLEL rep *= 2; } #endif #endif done: BN_free(e); return sig; } int sm2_do_verify(const EC_KEY *key, const EVP_MD *digest, const ECDSA_SIG *sig, const uint8_t *id, const size_t id_len, const uint8_t *msg, size_t msg_len) { BIGNUM *e = NULL; int ret = 0; #ifdef SM2_BENCHMARK int i, rep = 128000; clock_t c_start, c_end; struct timeval tval_start, tval_end, tval_diff; double cpu_time, wall_time; #endif e = sm2_compute_msg_hash(digest, key, id, id_len, msg, msg_len); if (e == NULL) { /* SM2err already called */ goto done; } ret = sm2_sig_verify(key, sig, e); #ifdef SM2_BENCHMARK #ifdef SM2_BENCHMARK_PARALLEL for (int thr = 1; thr <= 64; thr *= 2) { omp_set_num_threads(thr); printf("Start SM2 signature verification benchmark: thr %d.\n", thr); #else printf("Start SM2 signature verification benchmark."); #endif c_start = clock(); gettimeofday(&tval_start, NULL); #ifdef SM2_BENCHMARK_PARALLEL #pragma omp parallel for schedule(static) #endif for (i = 0; i < rep; i++) { sm2_sig_verify(key, sig, e); } gettimeofday(&tval_end, NULL); c_end = clock() - c_start; timersub(&tval_end, &tval_start, &tval_diff); cpu_time = (double)c_end / CLOCKS_PER_SEC; wall_time = (long)tval_diff.tv_sec + (double)tval_diff.tv_usec / 1000000; printf("Benchmark finished. CPU time: %lf s, Wall time: %lf s.\n", cpu_time, wall_time); printf("Speed is: %lf (CPU) / %lf (Wall) verify/s.\n", rep / cpu_time, rep / wall_time); #ifdef SM2_BENCHMARK_PARALLEL rep *= 2; } #endif #endif done: BN_free(e); return ret; } int sm2_sign(const unsigned char *dgst, int dgstlen, unsigned char *sig, unsigned int *siglen, EC_KEY *eckey) { BIGNUM *e = NULL; ECDSA_SIG *s = NULL; int sigleni; int ret = -1; e = BN_bin2bn(dgst, dgstlen, NULL); if (e == NULL) { SM2err(SM2_F_SM2_SIGN, ERR_R_BN_LIB); goto done; } s = sm2_sig_gen(eckey, e); sigleni = i2d_ECDSA_SIG(s, &sig); if (sigleni < 0) { SM2err(SM2_F_SM2_SIGN, ERR_R_INTERNAL_ERROR); goto done; } *siglen = (unsigned int)sigleni; ret = 1; done: ECDSA_SIG_free(s); BN_free(e); return ret; } int sm2_verify(const unsigned char *dgst, int dgstlen, const unsigned char *sig, int sig_len, EC_KEY *eckey) { ECDSA_SIG *s = NULL; BIGNUM *e = NULL; const unsigned char *p = sig; unsigned char *der = NULL; int derlen = -1; int ret = -1; s = ECDSA_SIG_new(); if (s == NULL) { SM2err(SM2_F_SM2_VERIFY, ERR_R_MALLOC_FAILURE); goto done; } if (d2i_ECDSA_SIG(&s, &p, sig_len) == NULL) { SM2err(SM2_F_SM2_VERIFY, SM2_R_INVALID_ENCODING); goto done; } /* Ensure signature uses DER and doesn't have trailing garbage */ derlen = i2d_ECDSA_SIG(s, &der); if (derlen != sig_len || memcmp(sig, der, derlen) != 0) { SM2err(SM2_F_SM2_VERIFY, SM2_R_INVALID_ENCODING); goto done; } e = BN_bin2bn(dgst, dgstlen, NULL); if (e == NULL) { SM2err(SM2_F_SM2_VERIFY, ERR_R_BN_LIB); goto done; } ret = sm2_sig_verify(eckey, s, e); done: OPENSSL_free(der); BN_free(e); ECDSA_SIG_free(s); return ret; }
expm_multiply_parallel.h
#ifndef _EXPM_MULTIPLY_H #define _EXPM_MULTIPLY_H #include "openmp.h" #include "csr_matvec.h" #include <cmath> #include <algorithm> #include <complex> #include <iostream> template <class I, class T> T csr_trace(const I n, const I n_col, const I Ap[], const I Aj[], const T Ax[]) { T trace = 0; const I N = (n<n_col?n_col:n); for(I i = 0; i < N; i++){ const I row_start = Ap[i]; const I row_end = Ap[i+1]; T diag = 0; for(I jj = row_start; jj < row_end; jj++){ if (Aj[jj] == i) diag += Ax[jj]; } trace += diag; } return trace; } // template<typename T> // T inline my_max(T norm,T x){ // T a = x*x; // // return (a<norm?norm:a); // return std::max(norm,a); // } // template<typename T> // T inline my_max(T norm,std::complex<T> x){ // T re = x.real(); // T im = x.imag(); // T a = re*re+im*im; // // return (a<norm?norm:a); // return std::max(norm,a); // } template<typename T,typename I> T inf_norm_chunk(T * arr,I begin,I end){ T max = 0; for(I i=begin;i<end;i++){ T a = arr[i]*arr[i]; max = std::max(max,a); } return std::sqrt(max); } template<typename T,typename I> T inf_norm_chunk(std::complex<T> * arr,I begin,I end){ T max = 0; for(I i=begin;i<end;i++){ T re = arr[i].real(); T im = arr[i].imag(); T a = re*re+im*im; max = std::max(max,a); } return std::sqrt(max); } template<typename I, typename T1,typename T2,typename T3> void _expm_multiply(const I n, const I Ap[], const I Aj[], const T1 Ax[], const int s, const int m_star, const T2 tol, const T1 mu, const T3 a, T3 F[], T3 B1[], T3 B2[] ) { T2 c1,c2,c3; bool flag=false; I rco[128]; T3 vco[128]; #pragma omp parallel shared(c1,c2,c3,flag,F,B1,B2,rco,vco) { int nthread = omp_get_num_threads(); int threadn = omp_get_thread_num(); I items_per_thread = n/nthread; I begin = items_per_thread * threadn; I end = items_per_thread * ( threadn + 1 ); if(threadn == nthread-1){ end += n%nthread; } T3 eta = std::exp(a*mu/T2(s)); #pragma omp for schedule(static,items_per_thread) for(I k=0;k<n;k++){ B1[k] = F[k]; B2[k] = 0; } for(int i=0;i<s;i++){ T2 c1_thread = inf_norm_chunk(B1,begin,end); #pragma omp single { c1 = 0; flag = false; } #pragma omp critical { c1 = std::max(c1,c1_thread); } for(int j=1;j<m_star+1 && !flag;j++){ csr_matvec(true,n,Ap,Aj,Ax,a/T2(j*s),B1,rco,vco,B2); #pragma omp for schedule(static,items_per_thread) for(I k=0;k<n;k++){ F[k] += B1[k] = B2[k]; } T2 c2_thread = inf_norm_chunk(B2,begin,end); T2 c3_thread = inf_norm_chunk(F,begin,end); #pragma omp single { c2 = c3 = 0; } #pragma omp critical { c2 = std::max(c2,c2_thread); c3 = std::max(c3,c3_thread); } #pragma omp barrier #pragma omp single { if((c1+c2)<=(tol*c3)){ flag=true; } c1 = c2; } } #pragma omp for schedule(static,items_per_thread) for(I k=0;k<n;k++){ F[k] *= eta; B1[k] = F[k]; } } } } #endif
GB_select_phase2.c
//------------------------------------------------------------------------------ // GB_select_phase2: C=select(A,thunk) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; // if A is iso and the op is user-defined, Ax [0] is passed to the user // selectop const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; size_t asize = A->type->size ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; // if A is bitmap, the bitmap selector is always used instead ASSERT (!GB_IS_BITMAP (A)) ; #ifndef GB_DIAG_SELECTOR // if A is full, all opcodes except DIAG use the bitmap selector instead ASSERT (!GB_IS_FULL (A)) ; #endif const int64_t *restrict kfirst_Aslice = A_ek_slicing ; const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ; const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ; //-------------------------------------------------------------------------- // C = select (A) //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; //---------------------------------------------------------------------- // selection from vectors kfirst to klast //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t pA_start, pA_end, pC ; GB_get_pA_and_pC (&pA_start, &pA_end, &pC, tid, k, kfirst, klast, pstart_Aslice, Cp_kfirst, Cp, avlen, Ap, avlen) ; //------------------------------------------------------------------ // compact Ai and Ax [pA_start ... pA_end-1] into Ci and Cx //------------------------------------------------------------------ #if defined ( GB_ENTRY_SELECTOR ) int64_t j = GBH (Ah, k) ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // A is never full; that case is now handled by the // bitmap selector instead. ASSERT (Ai != NULL) ; int64_t i = Ai [pA] ; GB_TEST_VALUE_OF_ENTRY (keep, pA) ; if (keep) { ASSERT (pC >= Cp [k] && pC < Cp [k+1]) ; Ci [pC] = i ; // Cx [pC] = Ax [pA] ; GB_SELECT_ENTRY (Cx, pC, Ax, pA) ; pC++ ; } } #elif defined ( GB_TRIL_SELECTOR ) || \ defined ( GB_ROWGT_SELECTOR ) // keep Zp [k] to pA_end-1 int64_t p = GB_IMAX (Zp [k], pA_start) ; int64_t mynz = pA_end - p ; if (mynz > 0) { // A and C are both sparse or hypersparse ASSERT (pA_start <= p && p + mynz <= pA_end) ; ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ; #if !GB_ISO_SELECT memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ; #endif } #elif defined ( GB_TRIU_SELECTOR ) || \ defined ( GB_ROWLE_SELECTOR ) // keep pA_start to Zp[k]-1 int64_t p = GB_IMIN (Zp [k], pA_end) ; int64_t mynz = p - pA_start ; if (mynz > 0) { // A and C are both sparse or hypersparse ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ; #if !GB_ISO_SELECT memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ; #endif } #elif defined ( GB_DIAG_SELECTOR ) // task that owns the diagonal entry does this work // A can be sparse or full, but not bitmap int64_t p = Zp [k] ; if (pA_start <= p && p < pA_end) { ASSERT (pC >= Cp [k] && pC + 1 <= Cp [k+1]) ; Ci [pC] = GBI (Ai, p, avlen) ; #if !GB_ISO_SELECT memcpy (Cx +pC*asize, Ax +p*asize, asize) ; #endif } #elif defined ( GB_OFFDIAG_SELECTOR ) || \ defined ( GB_ROWINDEX_SELECTOR ) // keep pA_start to Zp[k]-1 int64_t p = GB_IMIN (Zp [k], pA_end) ; int64_t mynz = p - pA_start ; if (mynz > 0) { // A and C are both sparse or hypersparse ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ; #if !GB_ISO_SELECT memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ; #endif pC += mynz ; } // keep Zp[k]+1 to pA_end-1 p = GB_IMAX (Zp [k]+1, pA_start) ; mynz = pA_end - p ; if (mynz > 0) { // A and C are both sparse or hypersparse ASSERT (pA_start <= p && p < pA_end) ; ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ; ASSERT (Ai != NULL) ; memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ; #if !GB_ISO_SELECT memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ; #endif } #endif } } }
yael_fisher_elem.c
/* Copyright © INRIA 2009-2014. Authors: Matthijs Douze & Herve Jegou Contact: matthijs.douze@inria.fr herve.jegou@inria.fr This file is part of Yael. Yael is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Yael is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Yael. If not, see <http://www.gnu.org/licenses/>. */ /* *** Not tested yet on an image set *** */ #include <stdio.h> #include <string.h> #include <assert.h> #include <math.h> #include <sys/time.h> #include <yael/vector.h> #include <yael/gmm.h> #include <yael/machinedeps.h> #include "mex.h" #define PARAM_V prhs[0] #define PARAM_W prhs[1] #define PARAM_MU prhs[2] #define PARAM_SIGMA prhs[3] void mexFunction (int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[]) { int i; if (nrhs < 4) mexErrMsgTxt("At least 4 arguments are required even nb of input arguments required."); else if (nlhs != 1) mexErrMsgTxt("yael_fisher produces exactly 1 output argument."); int flags = GMM_FLAGS_MU; int verbose = 0; int fishernorm1 = 1; if(mxGetClassID(PARAM_V)!=mxSINGLE_CLASS) mexErrMsgTxt("need single precision array."); if(mxGetClassID(PARAM_W)!=mxSINGLE_CLASS) mexErrMsgTxt("need single precision array."); if(mxGetClassID(PARAM_MU)!=mxSINGLE_CLASS) mexErrMsgTxt("need single precision array."); if(mxGetClassID(PARAM_SIGMA)!=mxSINGLE_CLASS) mexErrMsgTxt("need single precision array."); float *v = (float*) mxGetPr (PARAM_V); float *w = (float*) mxGetPr (PARAM_W); float *mu = (float*) mxGetPr (PARAM_MU); float *sigma = (float*) mxGetPr (PARAM_SIGMA); { int i; for(i = 4 ; i < nrhs ; i += 1) { char varname[256]; if (mxGetClassID(prhs[i]) != mxCHAR_CLASS) mexErrMsgTxt ("variable name required"); if (mxGetString (prhs[i], varname, 256) != 0) mexErrMsgTxt ("Could not convert string data"); if (!strcmp(varname, "sigma")) flags |= GMM_FLAGS_SIGMA; else if (!strcmp(varname,"weights")) flags |= GMM_FLAGS_W; else if (!strcmp(varname,"nomu")) flags &= ~ GMM_FLAGS_MU; else if (!strcmp(varname,"verbose")) verbose = 1; else if (!strcmp(varname,"nonorm")) fishernorm1 = 0; else mexErrMsgTxt("unknown variable name"); } } if (verbose) { fprintf (stdout, "v -> %ld x %ld\n", mxGetM (PARAM_V), mxGetN (PARAM_V)); fprintf (stdout, "w -> %ld x %ld\n", mxGetM (PARAM_W), mxGetN (PARAM_W)); fprintf (stdout, "mu -> %ld x %ld\n", mxGetM (PARAM_MU), mxGetN (PARAM_MU)); fprintf (stdout, "sigma -> %ld x %ld\n", mxGetM (PARAM_SIGMA), mxGetN (PARAM_SIGMA)); } int d = mxGetM (PARAM_V); /* vector dimensionality */ int n = mxGetN (PARAM_V); /* number of fisher vector to produce */ int k = mxGetN (PARAM_W); /* number of gaussian */ if (verbose) fprintf (stdout, "d = %d\nn = %d\nk = %d\n", d, n, k); if (mxGetM (PARAM_MU) != d || mxGetM (PARAM_SIGMA) != d || mxGetN (PARAM_MU) !=k || mxGetN (PARAM_SIGMA) != k || (mxGetM (PARAM_W) != 1 && mxGetN (PARAM_W) != 1) ) mexErrMsgTxt("Invalid input dimensionalities."); /* ouptut: GMM, i.e., weights, mu and variances */ gmm_t g = {d, k, w, mu, sigma}; int dout = gmm_fisher_sizeof (&g, flags); if (verbose) fprintf (stdout, "Size of the fisher vector = %d\n", dout); plhs[0] = mxCreateNumericMatrix (dout, n, mxSINGLE_CLASS, mxREAL); float * vf = (float *) mxGetPr (plhs[0]); #pragma omp parallel for private (i) for (i = 0 ; i < n ; i++) { gmm_fisher (1, v + i * d, &g, flags, vf + i * dout); } }
sudoku.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> // Sudoku-Solver // May 2018: Sven Bingert, Triet Doan // // ------------------------------------------------------ // print the sudoku on screen // ------------------------------------------------------ void printsudoku(int n, int m, int field[n][m]) { printf("\n"); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { printf("%2d ", field[i][j]); } printf("\n"); } } // ------------------------------------------------------ // search for fitting number in cell i,j // ------------------------------------------------------ void testnumber(int n, int m, int field[n][m], int i, int j) { // I'm at the row i, column j // Test each number k // If only one number fits -> success // int k_success; int k_tobeset; int n_numbers = 0; int mb, nb; int ldim = (int)sqrt((double)n); // for some reason unable to use default(none) here #pragma omp parallel for shared(ldim, k_tobeset, m, n, field, i, j) \ private(k_success, mb, nb) reduction(+: n_numbers) for (int k = 1; k < n + 1; k++) { // k_success = 1; for (int l = 0; l < n; l++) { if (field[i][l] == k) { k_success = 0; } } for (int l = 0; l < m; l++) { if (field[l][j] == k) { k_success = 0; } } // Check if the number in the region alread exists nb = i - i % ldim; mb = j - j % ldim; for (int l1 = nb; l1 < nb + ldim; l1++) { for (int l2 = mb; l2 < mb + ldim; l2++) { if (field[l1][l2] == k) { k_success = 0; } } } if (k_success == 1) { k_tobeset = k; } n_numbers = n_numbers + k_success; } // Success, new number will be added if (n_numbers == 1) { field[i][j] = k_tobeset; // printsudoku(n, m, field); } } // ------------------------------------------------------ // Main Function // ------------------------------------------------------ int main(int argc, const char *argv[]) { // int c, n, m; FILE *file; int minval; // // Read dimension and sudoku from file // input.file // input2.file // file = fopen("input2.file", "r"); // fscanf(file, "%2d,", &n); fscanf(file, "%2d,", &m); // int field[n][m]; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { fscanf(file, "%2d,", &field[i][j]); } } // printsudoku(n, m, field); fclose(file); // // Start of the main loop, each field is tested if not 0 // double start = omp_get_wtime(); minval = 0; while (minval == 0) { for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { if (field[i][j] == 0) { testnumber(n, m, field, i, j); } } } // // The Sudoku is succesfully filled if // the array contains no zeros // Another option would be to compute the sum // of all fields, which is fixed for given dimension. // #pragma omp master minval = 1; // #pragma omp barrier #pragma omp parallel for default(none) shared(n, m, field, minval) schedule(guided) for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { if (field[i][j] == 0) { #pragma omp atomic write minval = 0; } } } } double duration = omp_get_wtime() - start; // printsudoku(n, m, field); printf("%f\n", duration); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(24*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(24*t3+Nx+11,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),256*t4+254);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
fac_amr_fcoarsen.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Need to fix the way these variables are set and incremented in loops: * vals * ******************************************************************************/ #include "_hypre_sstruct_ls.h" #include "fac.h" #define MapStencilRank(stencil, rank) \ { \ HYPRE_Int ii,jj,kk; \ ii = hypre_IndexX(stencil); \ jj = hypre_IndexY(stencil); \ kk = hypre_IndexZ(stencil); \ if (ii==-1) \ ii=2; \ if (jj==-1) \ jj=2; \ if (kk==-1) \ kk=2; \ rank = ii + 3*jj + 9*kk; \ } #define InverseMapStencilRank(rank, stencil) \ { \ HYPRE_Int ij,ii,jj,kk; \ ij = (rank%9); \ ii = (ij%3); \ jj = (ij-ii)/3; \ kk = (rank-3*jj-ii)/9; \ if (ii==2) \ ii= -1; \ if (jj==2) \ jj= -1; \ if (kk==2) \ kk= -1; \ hypre_SetIndex3(stencil, ii, jj, kk); \ } #define AbsStencilShape(stencil, abs_shape) \ { \ HYPRE_Int ii,jj,kk; \ ii = hypre_IndexX(stencil); \ jj = hypre_IndexY(stencil); \ kk = hypre_IndexZ(stencil); \ abs_shape= hypre_abs(ii) + hypre_abs(jj) + hypre_abs(kk); \ } /*-------------------------------------------------------------------------- * hypre_AMR_FCoarsen: Coarsen the fbox and f/c connections. Forms the * coarse operator by averaging neighboring connections in the refinement * patch. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMR_FCoarsen( hypre_SStructMatrix * A, hypre_SStructMatrix * fac_A, hypre_SStructPMatrix * A_crse, hypre_Index refine_factors, HYPRE_Int level ) { hypre_Box fine_box; hypre_Box intersect_box; MPI_Comm comm = hypre_SStructMatrixComm(A); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(A); HYPRE_Int graph_type = hypre_SStructGraphObjectType(graph); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_IJMatrix ij_A = hypre_SStructMatrixIJMatrix(A); HYPRE_Int matrix_type= hypre_SStructMatrixObjectType(A); HYPRE_Int ndim = hypre_SStructMatrixNDim(A); hypre_SStructPMatrix *A_pmatrix = hypre_SStructMatrixPMatrix(fac_A, level); hypre_StructMatrix *smatrix_var; hypre_StructStencil *stencils, *stencils_last; HYPRE_Int stencil_size, stencil_last_size; hypre_Index stencil_shape_i, stencil_last_shape_i; hypre_Index loop_size; hypre_Box loop_box; HYPRE_Real **a_ptrs; hypre_Box *A_dbox; HYPRE_Int part_crse= level-1; HYPRE_Int part_fine= level; hypre_StructMatrix *crse_smatrix; HYPRE_Real *crse_ptr; HYPRE_Real **crse_ptrs; hypre_Box *crse_dbox; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_Index cstart; hypre_Index fstart, fend; hypre_Index stridec, stridef; hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; hypre_BoxArray ***fgrid_crse_extents; hypre_BoxArray ***fbox_interior; hypre_BoxArrayArray ***fbox_bdy; HYPRE_Int ***interior_fboxi; HYPRE_Int ***bdy_fboxi; HYPRE_Int ***cboxi_fboxes; HYPRE_Int **cboxi_fcnt; hypre_BoxArray *fbox_interior_ci, *fbox_bdy_ci_fi; hypre_BoxArrayArray *fbox_bdy_ci; HYPRE_Int *interior_fboxi_ci; HYPRE_Int *bdy_fboxi_ci; HYPRE_Int centre; hypre_BoxArray *data_space; HYPRE_Int ci, fi, arrayi; HYPRE_Int max_stencil_size= 27; HYPRE_Int trueV = 1; HYPRE_Int falseV= 0; HYPRE_Int found, sort; HYPRE_Int stencil_marker; HYPRE_Int *stencil_ranks, *rank_stencils; HYPRE_Int *stencil_contrib_cnt; HYPRE_Int **stencil_contrib_i; HYPRE_Real **weight_contrib_i; HYPRE_Real weights[4]= {1.0, 0.25, 0.125, 0.0625}; HYPRE_Real sum; HYPRE_Int abs_stencil_shape; hypre_Box **shift_box; hypre_Box coarse_cell_box; HYPRE_Int volume_coarse_cell_box; HYPRE_Int *volume_shift_box; HYPRE_Int max_contribut_size, stencil_i, rank; HYPRE_Int startrank; HYPRE_Real *vals, *vals2; HYPRE_Int i, j, k, l, m, n, ll, kk, jj; HYPRE_Int nvars, var1, var2, var2_start; HYPRE_Int iA, iAc, iA_shift_z, iA_shift_zy, iA_shift_zyx; hypre_Index lindex; hypre_Index index1, index2; hypre_Index index_temp; HYPRE_Int **box_graph_indices; HYPRE_Int *box_graph_cnts; HYPRE_Int *box_ranks, *box_ranks_cnt, *box_to_ranks_cnt; HYPRE_Int *cdata_space_ranks, *box_starts, *box_ends; HYPRE_Int *box_connections; HYPRE_Int **coarse_contrib_Uv; HYPRE_Int *fine_interface_ranks; HYPRE_Int nUventries= hypre_SStructGraphNUVEntries(graph); HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph); hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph); hypre_SStructUVEntry *Uventry; HYPRE_Int nUentries, cnt1; hypre_Index index, *cindex, *Uv_cindex; HYPRE_Int box_array_size, cbox_array_size; HYPRE_Int nrows, to_rank; HYPRE_Int *ncols, *rows, *cols; HYPRE_Int **interface_max_stencil_ranks; HYPRE_Int **interface_max_stencil_cnt; HYPRE_Int **interface_rank_stencils; HYPRE_Int **interface_stencil_ranks; HYPRE_Int *coarse_stencil_cnt; HYPRE_Real *stencil_vals; HYPRE_Int *common_rank_stencils, *common_stencil_ranks; HYPRE_Int *common_stencil_i; hypre_BoxManEntry *boxman_entry; HYPRE_Int *temp1, *temp2; HYPRE_Real *temp3; HYPRE_Real sum_contrib, scaling; HYPRE_Int **OffsetA; HYPRE_Int *parents; HYPRE_Int *parents_cnodes; HYPRE_Int myid; hypre_MPI_Comm_rank(comm, &myid); hypre_BoxInit(&fine_box, ndim); hypre_BoxInit(&intersect_box, ndim); hypre_BoxInit(&loop_box, ndim); hypre_BoxInit(&coarse_cell_box, ndim); /*-------------------------------------------------------------------------- * Task: Coarsen the fbox and f/c connections to form the coarse grid * operator inside the fgrid. *--------------------------------------------------------------------------*/ if (graph_type == HYPRE_SSTRUCT) { startrank = hypre_SStructGridGhstartRank(grid); } if (graph_type == HYPRE_PARCSR) { startrank = hypre_SStructGridStartRank(grid); } /*-------------------------------------------------------------------------- * Fine grid strides by the refinement factors. *--------------------------------------------------------------------------*/ hypre_SetIndex3(stridec, 1, 1, 1); for (i= 0; i< ndim; i++) { stridef[i]= refine_factors[i]; } for (i= ndim; i< 3; i++) { stridef[i]= 1; } /*-------------------------------------------------------------------------- * Scaling for averaging row sum. *--------------------------------------------------------------------------*/ scaling= 1.0; for (i= 0; i< ndim-2; i++) { scaling*= refine_factors[0]; } /*-------------------------------------------------------------------------- * Determine the coarsened fine grid- fgrid_crse_extents. * These are between fpart= level and cpart= (level-1). The * fgrid_crse_extents will be indexed by cboxes- the boxarray of coarsened * fboxes FULLY in a given cbox. * * Also, determine the interior and boundary boxes of each fbox. Having * these will allow us to determine the f/c interface nodes without * extensive checking. These are also indexed by the cboxes. * fgrid_interior- for each cbox, we have a collection of child fboxes, * each leading to an interior=> boxarray * fgrid_bdy - for each cbox, we have a collection of child fboxes, * each leading to a boxarray of bdies=> boxarrayarray. * Because we need to know the fbox id for these boxarray/boxarrayarray, * we will need one for each fbox. * * And, determine which cboxes contain a given fbox. That is, given a * fbox, find all cboxes that contain a chunk of it. *--------------------------------------------------------------------------*/ nvars = hypre_SStructPMatrixNVars(A_pmatrix); fgrid_crse_extents = hypre_TAlloc(hypre_BoxArray **, nvars); fbox_interior = hypre_TAlloc(hypre_BoxArray **, nvars); fbox_bdy = hypre_TAlloc(hypre_BoxArrayArray **, nvars); interior_fboxi = hypre_TAlloc(HYPRE_Int **, nvars); bdy_fboxi = hypre_TAlloc(HYPRE_Int **, nvars); cboxi_fboxes = hypre_TAlloc(HYPRE_Int **, nvars); cboxi_fcnt = hypre_TAlloc(HYPRE_Int *, nvars); for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid_crse_extents[var1]= hypre_TAlloc(hypre_BoxArray *, hypre_BoxArraySize(cgrid_boxes)); fbox_interior[var1]= hypre_TAlloc(hypre_BoxArray *, hypre_BoxArraySize(cgrid_boxes)); fbox_bdy[var1] = hypre_TAlloc(hypre_BoxArrayArray *, hypre_BoxArraySize(cgrid_boxes)); interior_fboxi[var1]= hypre_TAlloc(HYPRE_Int *, hypre_BoxArraySize(cgrid_boxes)); bdy_fboxi[var1] = hypre_TAlloc(HYPRE_Int *, hypre_BoxArraySize(cgrid_boxes)); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); cboxi_fboxes[var1]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(fgrid_boxes)); cboxi_fcnt[var1] = hypre_CTAlloc(HYPRE_Int , hypre_BoxArraySize(fgrid_boxes)); /*----------------------------------------------------------------------- * Determine the fine grid boxes that are underlying a coarse grid box. * Coarsen the indices to determine the looping extents of these * boxes. Also, find the looping extents for the extended coarsened * boxes, and the interior and boundary extents of a fine_grid box. * The fine_grid boxes must be adjusted so that only the coarse nodes * inside these boxes are included. Only the lower bound needs to be * adjusted. *-----------------------------------------------------------------------*/ hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); hypre_CopyIndex(hypre_BoxIMin(cgrid_box), cstart); cnt1= 0; temp1= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); hypre_ClearIndex(index_temp); hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { temp1[cnt1++]= fi; } } fgrid_crse_extents[var1][ci]= hypre_BoxArrayCreate(cnt1, ndim); fbox_interior[var1][ci] = hypre_BoxArrayCreate(cnt1, ndim); fbox_bdy[var1][ci] = hypre_BoxArrayArrayCreate(cnt1, ndim); interior_fboxi[var1][ci] = hypre_CTAlloc(HYPRE_Int, cnt1); bdy_fboxi[var1][ci] = hypre_CTAlloc(HYPRE_Int, cnt1); for (fi= 0; fi< cnt1; fi++) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, temp1[fi]); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /*-------------------------------------------------------------------- * record which sides will be adjusted- fstart adjustments will * decrease the box size, whereas fend adjustments will increase the * box size. Since we fstart decreases the box size, we cannot * have an f/c interface at an adjusted fstart end. fend may * correspond to an f/c interface whether it has been adjusted or not. *--------------------------------------------------------------------*/ hypre_SetIndex3(index1, 1, 1, 1); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; index1[i] = 0; } j= fend[i]%refine_factors[i]; if (refine_factors[i]-1 - j) { fend[i] +=(refine_factors[i]-1) - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); hypre_CopyBox(&intersect_box, hypre_BoxArrayBox(fgrid_crse_extents[var1][ci], fi)); /*-------------------------------------------------------------------- * adjust the fine intersect_box so that we get the interior and * boundaries separately. *--------------------------------------------------------------------*/ hypre_StructMapCoarseToFine(hypre_BoxIMin(&intersect_box), index_temp, refine_factors, hypre_BoxIMin(&fine_box)); /* the following index2 shift for ndim<3 is no problem since refine_factors[j]= 1 for j>=ndim. */ hypre_SetIndex3(index2, refine_factors[0]-1, refine_factors[1]-1, refine_factors[2]-1); hypre_StructMapCoarseToFine(hypre_BoxIMax(&intersect_box), index2, refine_factors, hypre_BoxIMax(&fine_box)); hypre_SetIndex3(index2, 1, 1, 1); hypre_CopyBox(&fine_box, &loop_box); for (i= 0; i< ndim; i++) { hypre_BoxIMin(&loop_box)[i]+= refine_factors[i]*index1[i]; hypre_BoxIMax(&loop_box)[i]-= refine_factors[i]*index2[i]; } hypre_CopyBox(&loop_box, hypre_BoxArrayBox(fbox_interior[var1][ci], fi)); interior_fboxi[var1][ci][fi]= temp1[fi]; hypre_SubtractBoxes(&fine_box, &loop_box, hypre_BoxArrayArrayBoxArray(fbox_bdy[var1][ci], fi)); bdy_fboxi[var1][ci][fi]= temp1[fi]; } hypre_TFree(temp1); } /* hypre_ForBoxI(ci, cgrid_boxes) */ /*-------------------------------------------------------------------- * Determine the cboxes that contain a chunk of a given fbox. *--------------------------------------------------------------------*/ hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); temp1= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(cgrid_boxes)); hypre_ForBoxI(i, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, i); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { temp1[cboxi_fcnt[var1][fi]]= i; cboxi_fcnt[var1][fi]++; } } cboxi_fboxes[var1][fi]= hypre_TAlloc(HYPRE_Int, cboxi_fcnt[var1][fi]); for (i= 0; i< cboxi_fcnt[var1][fi]; i++) { cboxi_fboxes[var1][fi][i]= temp1[i]; } hypre_TFree(temp1); } } /* for (var1= 0; var1< nvars; var1++) */ /*-------------------------------------------------------------------------- * STEP 1: * COMPUTE THE COARSE LEVEL OPERATOR INSIDE OF A REFINED BOX. * * We assume that the coarse and fine grid variables are of the same type. * * Coarse stencils in the refinement patches are obtained by averaging the * fine grid coefficients. Since we are assuming cell-centred discretization, * we apply a weighted averaging of ONLY the fine grid coefficients along * interfaces of adjacent agglomerated coarse cells. * * Since the stencil pattern is assumed arbitrary, we must determine the * stencil pattern of each var1-var2 struct_matrix to get the correct * contributing stencil coefficients, averaging weights, etc. *--------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- * Agglomerated coarse cell info. These are needed in defining the looping * extents for averaging- i.e., we loop over extents determined by the * size of the agglomerated coarse cell. * Note that the agglomerated coarse cell is constructed correctly for * any dimensions (1, 2, or 3). *--------------------------------------------------------------------------*/ hypre_ClearIndex(index_temp); hypre_CopyIndex(index_temp, hypre_BoxIMin(&coarse_cell_box)); hypre_SetIndex3(index_temp, refine_factors[0]-1, refine_factors[1]-1, refine_factors[2]-1 ); hypre_CopyIndex(index_temp, hypre_BoxIMax(&coarse_cell_box)); volume_coarse_cell_box= hypre_BoxVolume(&coarse_cell_box); /*-------------------------------------------------------------------------- * Offsets in y & z directions for refinement patches. These will be used * for pointing to correct coarse stencil location. *--------------------------------------------------------------------------*/ OffsetA = hypre_CTAlloc(HYPRE_Int *, 2); for (i= 0; i< 2; i++) { OffsetA[i]= hypre_CTAlloc(HYPRE_Int, refine_factors[i+1]); } /*-------------------------------------------------------------------------- * Stencil contribution cnts, weights, etc are computed only if we have * a new stencil pattern. If the pattern is the same, the previously * computed stencil contribution cnts, weights, etc can be used. * * Mark the stencil_marker so that the first time the stencil is non-null, * the stencil contribution cnts, weights, etc are computed. *--------------------------------------------------------------------------*/ stencil_marker= trueV; for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); for (var2= 0; var2< nvars; var2++) { stencils= hypre_SStructPMatrixSStencil(A_crse, var1, var2); if (stencils != NULL) { stencil_size= hypre_StructStencilSize(stencils); /*----------------------------------------------------------------- * When stencil_marker== true, form the stencil contributions cnts, * weights, etc. This occurs for the first non-null stencil or * when the stencil shape of the current non-null stencil has a * different stencil shape from that of the latest non-null stencil. * * But when stencil_marker== false, we must check to see if we * need new stencil contributions cnts, weights, etc. Thus, find * the latest non-null stencil for comparison. *-----------------------------------------------------------------*/ if (stencil_marker == falseV) { /* search for the first previous non-null stencil */ found = falseV; var2_start= var2-1; for (j= var1; j>= 0; j--) { for (i= var2_start; i>= 0; i--) { stencils_last= hypre_SStructPMatrixSStencil(A_crse, j, i); if (stencils_last != NULL) { found= trueV; break; } } if (found) { break; } else { var2_start= nvars-1; } } /*-------------------------------------------------------------- * Compare the stencil shape. *--------------------------------------------------------------*/ stencil_last_size= hypre_StructStencilSize(stencils_last); if (stencil_last_size != stencil_size) { stencil_marker= trueV; break; } else { found= falseV; for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); hypre_CopyIndex(hypre_StructStencilElement(stencils_last,i), stencil_last_shape_i); hypre_SetIndex3(index_temp, stencil_shape_i[0]-stencil_last_shape_i[0], stencil_shape_i[1]-stencil_last_shape_i[1], stencil_shape_i[2]-stencil_last_shape_i[2]); AbsStencilShape(index_temp, abs_stencil_shape); if (abs_stencil_shape) { found= trueV; stencil_marker= trueV; hypre_TFree(stencil_contrib_cnt); hypre_TFree(stencil_ranks); for (i= 0; i< stencil_size; i++) { hypre_BoxDestroy(shift_box[i]); } hypre_TFree(shift_box); hypre_TFree(volume_shift_box); hypre_TFree(vals); for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { hypre_TFree(stencil_contrib_i[stencil_i]); hypre_TFree(weight_contrib_i[stencil_i]); } } hypre_TFree(stencil_contrib_i); hypre_TFree(weight_contrib_i); hypre_TFree(rank_stencils); } if (found) { break; } } /* for (i= 0; i< stencil_size; i++) */ } /* else */ } /* if (stencil_marker == false) */ /*----------------------------------------------------------------- * If stencil_marker==true, form the contribution structures. * Since the type of averaging is determined by the stencil shapes, * we need a ranking of the stencil shape to allow for easy * determination. * * top: 14 12 13 centre: 5 3 4 bottom 23 21 22 * 11 9 10 2 0 1 20 18 19 * 17 15 16 8 6 7 26 24 25 * * for stencil of max. size 27. * * stencil_contrib_cnt[i]= no. of fine stencils averaged to * form stencil entry i. * stencil_contrib_i[i] = rank of fine stencils contributing * to form stencil entry i. * weight_contrib_i[i] = array of weights for weighting * the contributions to stencil entry i. * stencil_ranks[i] = rank of stencil entry i. * rank_stencils[i] = stencil entry of rank i. *-----------------------------------------------------------------*/ if (stencil_marker == trueV) { /* mark stencil_marker for the next stencil */ stencil_marker= falseV; stencil_contrib_cnt= hypre_CTAlloc(HYPRE_Int, stencil_size); stencil_contrib_i = hypre_TAlloc(HYPRE_Int *, stencil_size); weight_contrib_i = hypre_TAlloc(HYPRE_Real *, stencil_size); stencil_ranks = hypre_TAlloc(HYPRE_Int, stencil_size); rank_stencils = hypre_TAlloc(HYPRE_Int, max_stencil_size); shift_box = hypre_TAlloc(hypre_Box *, stencil_size); volume_shift_box = hypre_TAlloc(HYPRE_Int, stencil_size); for (i= 0; i< max_stencil_size; i++) { rank_stencils[i]= -1; if (i < stencil_size) { stencil_ranks[i]= -1; } } /*----------------------------------------------------------------- * Get mappings between stencil entries and ranks and vice versa; * fine grid looping extents for averaging of the fine coefficients; * and the number of fine grid values to be averaged. * Note that the shift_boxes are constructed correctly for any * dimensions. For j>=ndim, * hypre_BoxIMin(shift_box[i])[j]=hypre_BoxIMax(shift_box[i])[j]= 0. *-----------------------------------------------------------------*/ for (i= 0; i< stencil_size; i++) { shift_box[i] = hypre_BoxCreate(ndim); hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank(stencil_shape_i, j); stencil_ranks[i]= j; rank_stencils[stencil_ranks[i]] = i; hypre_SetIndex3(hypre_BoxIMin(shift_box[i]), (refine_factors[0]-1)*stencil_shape_i[0], (refine_factors[1]-1)*stencil_shape_i[1], (refine_factors[2]-1)*stencil_shape_i[2]); hypre_AddIndexes(hypre_BoxIMin(shift_box[i]), hypre_BoxIMax(&coarse_cell_box), 3, hypre_BoxIMax(shift_box[i])); hypre_IntersectBoxes(&coarse_cell_box, shift_box[i], shift_box[i]); volume_shift_box[i]= hypre_BoxVolume(shift_box[i]); } /*----------------------------------------------------------------- * Derive the contribution info. * The above rank table is used to determine the direction indices. * Weight construction procedure valid for any dimensions. *-----------------------------------------------------------------*/ /* east */ stencil_i= rank_stencils[1]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 4; i<= 7; i+=3) { if (rank_stencils[i] != -1) /* ne or se */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 1; i<= 7; i+=3) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= stencil_contrib_cnt[stencil_i]; } /* fill up the east contribution stencil indices */ if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 4; i<= 7; i+=3) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 1; i<= 7; i+=3) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* west */ stencil_i= rank_stencils[2]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 5; i<= 8; i+=3) { if (rank_stencils[i] != -1) /* nw or sw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 2; i<= 8; i+=3) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 5; i<= 8; i+=3) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 2; i<= 8; i+=3) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* north */ stencil_i= rank_stencils[3]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 4; i<= 5; i++) { if (rank_stencils[i] != -1) /* ne or nw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 3; i<= 5; i++) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 4; i<= 5; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 3; i<= 5; i++) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* south */ stencil_i= rank_stencils[6]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 7; i<= 8; i++) { if (rank_stencils[i] != -1) /* ne or nw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 6; i<= 8; i++) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 7; i<= 8; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 6; i<= 8; i++) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /*----------------------------------------------------------------- * If only 2-d, extract the corner indices. *-----------------------------------------------------------------*/ if (ndim == 2) { /* corners: ne & nw */ for (i= 4; i<= 5; i++) { stencil_i= rank_stencils[i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } /* corners: se & sw */ for (i= 7; i<= 8; i++) { stencil_i= rank_stencils[i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } } /*----------------------------------------------------------------- * Additional directions for 3-dim case *-----------------------------------------------------------------*/ if (ndim > 2) { /* sides: top */ stencil_i= rank_stencils[9]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=1; i<= 8; i++) { if (rank_stencils[9+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=1; i<= 8; i++) { if (rank_stencils[9+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[9+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* sides: bottom */ stencil_i= rank_stencils[18]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=1; i<= 8; i++) { if (rank_stencils[18+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=1; i<= 8; i++) { if (rank_stencils[18+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[18+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[18+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cne */ stencil_i= rank_stencils[4]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+4] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+4] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+4]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+4]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cse */ stencil_i= rank_stencils[7]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+7] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+7] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+7]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+7]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cnw */ stencil_i= rank_stencils[5]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+5] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+5] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+5]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+5]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: csw */ stencil_i= rank_stencils[8]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+8] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+8] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+8]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+8]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top east */ stencil_i= rank_stencils[10]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[10+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[10+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[10+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[10+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top west */ stencil_i= rank_stencils[11]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[11+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[11+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[11+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[11+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top north */ stencil_i= rank_stencils[12]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=13; i<= 14; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=13; i<= 14; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top south*/ stencil_i= rank_stencils[15]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=16; i<= 17; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=16; i<= 17; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom east */ stencil_i= rank_stencils[19]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[19+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[19+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[19+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[19+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom west */ stencil_i= rank_stencils[20]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[20+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[20+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[20+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[20+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom north */ stencil_i= rank_stencils[21]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=22; i<= 23; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=22; i<= 23; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom south*/ stencil_i= rank_stencils[24]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=25; i<= 26; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=25; i<= 26; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* corners*/ for (j= 1; j<= 2; j++) { for (i= 4; i<= 5; i++) { stencil_i= rank_stencils[9*j+i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } for (i= 7; i<= 8; i++) { stencil_i= rank_stencils[9*j+i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } } } /* if ndim > 2 */ /*----------------------------------------------------------------- * Allocate for the temporary vector used in computing the * averages. *-----------------------------------------------------------------*/ vals= hypre_CTAlloc(HYPRE_Real, max_contribut_size); /*----------------------------------------------------------------- * coarse grid stencil contributor structures have been formed. *-----------------------------------------------------------------*/ } /* if (stencil_marker == true) */ /*--------------------------------------------------------------------- * Loop over gridboxes to average stencils *---------------------------------------------------------------------*/ smatrix_var = hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2); crse_smatrix= hypre_SStructPMatrixSMatrix(A_crse, var1, var2); /*--------------------------------------------------------------------- * data ptrs to extract and fill in data. *---------------------------------------------------------------------*/ a_ptrs = hypre_TAlloc(HYPRE_Real *, stencil_size); crse_ptrs= hypre_TAlloc(HYPRE_Real *, stencil_size); hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); fbox_interior_ci = fbox_interior[var1][ci]; fbox_bdy_ci = fbox_bdy[var1][ci]; interior_fboxi_ci= interior_fboxi[var1][ci]; bdy_fboxi_ci = bdy_fboxi[var1][ci]; crse_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(crse_smatrix), ci); /*------------------------------------------------------------------ * grab the correct coarse grid pointers. These are the parent base * grids. *------------------------------------------------------------------*/ for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); crse_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(crse_smatrix, ci, stencil_shape_i); } /*------------------------------------------------------------------ * Loop over the interior of each patch inside cgrid_box. *------------------------------------------------------------------*/ hypre_ForBoxI(fi, fbox_interior_ci) { fgrid_box= hypre_BoxArrayBox(fbox_interior_ci, fi); /*-------------------------------------------------------------- * grab the fine grid ptrs & create the offsets for the fine * grid ptrs. *--------------------------------------------------------------*/ A_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), interior_fboxi_ci[fi]); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, interior_fboxi_ci[fi], stencil_shape_i); } /*--------------------------------------------------------------- * Compute the offsets for pointing to the correct data. * Note that for 1-d, OffsetA[j][i]= 0. Therefore, this ptr * will be correct for 1-d. *---------------------------------------------------------------*/ for (j= 0; j< 2; j++) { OffsetA[j][0]= 0; for (i= 1; i< refine_factors[j+1]; i++) { if (j == 0) { hypre_SetIndex3(index_temp, 0, i, 0); } else { hypre_SetIndex3(index_temp, 0, 0, i); } OffsetA[j][i] = hypre_BoxOffsetDistance(A_dbox, index_temp); } } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /* coarsen the interior patch box*/ hypre_ClearIndex(index_temp); hypre_StructMapFineToCoarse(fstart, index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(fend, index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); /*---------------------------------------------------------------- * Loop over interior grid box. *----------------------------------------------------------------*/ hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, A_dbox, fstart, stridef, iA, crse_dbox, cstart, stridec, iAc); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,i,rank,index1,index2,m,l,k,j,iA_shift_z,iA_shift_zy,iA_shift_zyx,stencil_i,sum,vals) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(iA, iAc) { for (i= 0; i< stencil_size; i++) { rank= stencil_ranks[i]; /*------------------------------------------------------------ * Loop over refinement agglomeration making up a coarse cell * when a non-centre stencil. *------------------------------------------------------------*/ if (rank) { /*-------------------------------------------------------- * Loop over refinement agglomeration extents making up a * a coarse cell. *--------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(shift_box[i]), index1); hypre_CopyIndex(hypre_BoxIMax(shift_box[i]), index2); for (m= 0; m< stencil_contrib_cnt[i]; m++) { vals[m]= 0.0; } /*-------------------------------------------------------- * For 1-d, index1[l]= index2[l]= 0, l>=1. So * iA_shift_zyx= j, * which is correct. Similarly, 2-d is correct. *--------------------------------------------------------*/ for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_contrib_cnt[i]; m++) { stencil_i= stencil_contrib_i[i][m]; vals[m]+= a_ptrs[stencil_i][iA_shift_zyx]; } } } } /*---------------------------------------------------------- * average & weight the contributions and place into coarse * stencil entry. *----------------------------------------------------------*/ crse_ptrs[i][iAc]= 0.0; for (m= 0; m< stencil_contrib_cnt[i]; m++) { crse_ptrs[i][iAc]+= vals[m]*weight_contrib_i[i][m]; } crse_ptrs[i][iAc]/= volume_shift_box[i]; } /* if (rank) */ } /* for i */ /*------------------------------------------------------------------ * centre stencil: * The centre stencil is computed so that the row sum is equal to * the sum of the row sums of the fine matrix. Uses the computed * coarse off-diagonal stencils. * * No fine-coarse interface for the interior boxes. *------------------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(&coarse_cell_box), index1); hypre_CopyIndex(hypre_BoxIMax(&coarse_cell_box), index2); sum= 0.0; for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_size; m++) { sum+= a_ptrs[m][iA_shift_zyx]; } } } } /*--------------------------------------------------------------- * coarse centre coefficient- when away from the fine-coarse * interface, the centre coefficient is the sum of the * off-diagonal components. *---------------------------------------------------------------*/ sum /= scaling; for (m= 0; m< stencil_size; m++) { rank= stencil_ranks[m]; if (rank) { sum-= crse_ptrs[m][iAc]; } } crse_ptrs[ rank_stencils[0] ][iAc]= sum; } hypre_BoxLoop2End(iA, iAc); } /* end hypre_ForBoxI(fi, fbox_interior_ci) */ /*------------------------------------------------------------------ * Loop over the boundaries of each patch inside cgrid_box. *------------------------------------------------------------------*/ hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) { fbox_bdy_ci_fi= hypre_BoxArrayArrayBoxArray(fbox_bdy_ci, arrayi); hypre_ForBoxI(fi, fbox_bdy_ci_fi) { fgrid_box= hypre_BoxArrayBox(fbox_bdy_ci_fi, fi); /*----------------------------------------------------------- * grab the fine grid ptrs & create the offsets for the fine * grid ptrs. *-----------------------------------------------------------*/ A_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), bdy_fboxi_ci[arrayi]); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, bdy_fboxi_ci[arrayi], stencil_shape_i); } /*-------------------------------------------------------------- * Compute the offsets for pointing to the correct data. *--------------------------------------------------------------*/ for (j= 0; j< 2; j++) { OffsetA[j][0]= 0; for (i= 1; i< refine_factors[j+1]; i++) { if (j == 0) { hypre_SetIndex3(index_temp, 0, i, 0); } else { hypre_SetIndex3(index_temp, 0, 0, i); } OffsetA[j][i] = hypre_BoxOffsetDistance(A_dbox, index_temp); } } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /* coarsen the patch box*/ hypre_ClearIndex(index_temp); hypre_StructMapFineToCoarse(fstart, index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(fend, index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); /*-------------------------------------------------------------- * Loop over boundary grid box. *--------------------------------------------------------------*/ hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, A_dbox, fstart, stridef, iA, crse_dbox, cstart, stridec, iAc); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,i,rank,index1,index2,m,l,k,j,iA_shift_z,iA_shift_zy,iA_shift_zyx,stencil_i,temp3,ll,kk,jj,temp2,cnt1,index_temp,boxman_entry,found,Uventry,nUentries,ncols,rows,cols,vals2,sum,vals) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(iA, iAc) { hypre_BoxLoopGetIndex(lindex); for (i= 0; i< stencil_size; i++) { rank= stencil_ranks[i]; /*-------------------------------------------------------- * Loop over refinement agglomeration making up a coarse * cell when a non-centre stencil. *--------------------------------------------------------*/ if (rank) { /*----------------------------------------------------- * Loop over refinement agglomeration extents making up * a coarse cell. *-----------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(shift_box[i]), index1); hypre_CopyIndex(hypre_BoxIMax(shift_box[i]), index2); for (m= 0; m< stencil_contrib_cnt[i]; m++) { vals[m]= 0.0; } for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_contrib_cnt[i]; m++) { stencil_i= stencil_contrib_i[i][m]; vals[m]+= a_ptrs[stencil_i][iA_shift_zyx]; } } } } /*--------------------------------------------------------- * average & weight the contributions and place into coarse * stencil entry. *---------------------------------------------------------*/ crse_ptrs[i][iAc]= 0.0; for (m= 0; m< stencil_contrib_cnt[i]; m++) { crse_ptrs[i][iAc]+= vals[m]*weight_contrib_i[i][m]; } crse_ptrs[i][iAc]/= volume_shift_box[i]; } /* if (rank) */ } /* for i */ /*--------------------------------------------------------------- * centre stencil: * The centre stencil is computed so that the row sum is equal to * th sum of the row sums of the fine matrix. Uses the computed * coarse off-diagonal stencils. * * Along the fine-coarse interface, we need to add the unstructured * connections. *---------------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(&coarse_cell_box), index1); hypre_CopyIndex(hypre_BoxIMax(&coarse_cell_box), index2); temp3= hypre_CTAlloc(HYPRE_Real, volume_coarse_cell_box); /*--------------------------------------------------------------- * iA_shift_zyx is computed correctly for 1 & 2-d. Also, * ll= 0 for 2-d, and ll= kk= 0 for 1-d. Correct ptrs. *---------------------------------------------------------------*/ for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; ll = l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; kk = ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; jj = kk + j; for (m= 0; m< stencil_size; m++) { temp3[jj]+= a_ptrs[m][iA_shift_zyx]; } } } } /*------------------------------------------------------------ * extract all unstructured connections. Note that we extract * from sstruct_matrix A, which already has been assembled. *------------------------------------------------------------*/ if (nUventries > 0) { temp2= hypre_CTAlloc(HYPRE_Int, volume_coarse_cell_box); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; hypre_SetIndex3(index_temp, j+lindex[0]*stridef[0], k+lindex[1]*stridef[1], l+lindex[2]*stridef[2]); hypre_AddIndexes(fstart, index_temp, 3, index_temp); hypre_SStructGridFindBoxManEntry(grid, part_fine, index_temp, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index_temp, &rank, matrix_type); found= falseV; i= hypre_SStructGraphIUVEntry(graph, 0); m= hypre_SStructGraphIUVEntry(graph, nUventries-1); if ((rank-startrank) >= i && (rank-startrank) <= m) { found= trueV; } if (found) { Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); if (Uventry != NULL) { nUentries= hypre_SStructUVEntryNUEntries(Uventry); m= 0; for (i= 0; i< nUentries; i++) { if (hypre_SStructUVEntryToPart(Uventry, i)==part_crse) { m++; } } /* for (i= 0; i< nUentries; i++) */ temp2[jj]= m; cnt1 += m; } /* if (Uventry != NULL) */ } /* if (found) */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ ncols= hypre_TAlloc(HYPRE_Int, cnt1); for (l= 0; l< cnt1; l++) { ncols[l]= 1; } rows = hypre_TAlloc(HYPRE_Int, cnt1); cols = hypre_TAlloc(HYPRE_Int, cnt1); vals2= hypre_CTAlloc(HYPRE_Real, cnt1); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; hypre_SetIndex3(index_temp, j+lindex[0]*stridef[0], k+lindex[1]*stridef[1], l+lindex[2]*stridef[2]); hypre_AddIndexes(fstart, index_temp, 3, index_temp); hypre_SStructGridFindBoxManEntry(grid, part_fine, index_temp, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index_temp, &rank, matrix_type); found= falseV; if (nUventries > 0) { i= hypre_SStructGraphIUVEntry(graph, 0); m= hypre_SStructGraphIUVEntry(graph, nUventries-1); if ((rank-startrank) >= i && (rank-startrank) <= m) { found= trueV; } } if (found) { Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); if (Uventry != NULL) { nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (i= 0; i< nUentries; i++) { if (hypre_SStructUVEntryToPart(Uventry, i)==part_crse) { rows[cnt1]= rank; cols[cnt1++]= hypre_SStructUVEntryToRank(Uventry, i); } } /* for (i= 0; i< nUentries; i++) */ } /* if (Uventry != NULL) */ } /* if (found) */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ HYPRE_IJMatrixGetValues(ij_A, cnt1, ncols, rows, cols, vals2); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; for (m= 0; m< temp2[jj]; m++) { temp3[jj]+= vals2[cnt1]; cnt1++; } temp2[jj]= 0; /* zero off for next time */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); hypre_TFree(vals2); hypre_TFree(temp2); } /* if Uventries > 0 */ sum= 0.0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk + j; sum+= temp3[jj]; } } } sum /= scaling; crse_ptrs[ rank_stencils[0] ][iAc]= sum; hypre_TFree(temp3); } hypre_BoxLoop2End(iA, iAc); } /* hypre_ForBoxI(fi, fbox_bdy_ci_fi) */ } /* hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ hypre_TFree(a_ptrs); hypre_TFree(crse_ptrs); } /* if (stencils != NULL) */ } /* end var2 */ } /* end var1 */ if (stencil_contrib_cnt) hypre_TFree(stencil_contrib_cnt); if (stencil_ranks) hypre_TFree(stencil_ranks); if (volume_shift_box) hypre_TFree(volume_shift_box); if (vals) hypre_TFree(vals); if (shift_box) { for (j= 0; j< stencil_size; j++) { if (shift_box[j]) hypre_BoxDestroy(shift_box[j]); } hypre_TFree(shift_box); } if (stencil_contrib_i) { for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { if (stencil_contrib_i[stencil_i]) hypre_TFree(stencil_contrib_i[stencil_i]); } } hypre_TFree(stencil_contrib_i); } if (weight_contrib_i) { for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { if (weight_contrib_i[stencil_i]) hypre_TFree(weight_contrib_i[stencil_i]); } } hypre_TFree(weight_contrib_i); } if (rank_stencils) hypre_TFree(rank_stencils); if (OffsetA) { for (j= 0; j< 2; j++) { if (OffsetA[j]) hypre_TFree(OffsetA[j]); } hypre_TFree(OffsetA); } /*-------------------------------------------------------------------------- * STEP 2: * * Interface coarsening: fine-to-coarse connections. We are * assuming that only like-variables couple along interfaces. * * The task is to coarsen all the fine-to-coarse unstructured * connections and to compute coarse coefficients along the * interfaces (coarse-to-fine coefficients are obtained from these * computed values assuming symmetry). This involves * 1) scanning over the graph entries to find the locations of * the unstructure connections; * 2) determining the stencil shape of the coarsened connections; * 3) averaging the unstructured coefficients to compute * coefficient entries for the interface stencils; * 4) determining the weights of the interface stencil coefficients * to construct the structured coarse grid matrix along the * interfaces. * * We perform this task by * 1) scanning over the graph entries to group the locations * of the fine-to-coarse connections wrt the boxes of the * fine grid. Temporary vectors storing the Uventries indices * and the number of connections for each box will be created; * 2) for each fine grid box, group the fine-to-coarse connections * with respect to the connected coarse nodes. Temporary vectors * storing the Uventry indices and the Uentry indices for each * coarse node will be created (i.e., for a fixed coarse grid node, * record the fine node Uventries indices that connect to this * coarse node and Uentry index of the Uventry that contains * this coarse node.). The grouping is accomplished comparing the * ranks of the coarse nodes; * 3) using the Uventries and Uentry indices for each coarse node, * "coarsen" the fine grid connections to this coarse node to * create interface stencils (wrt to the coarse nodes- i.e., * the centre of the stencil is at a coarse node). Also, find * the IJ rows and columns corresponding to all the fine-to-coarse * connections in a box, and extract the unstructured coefficients; * 4) looping over all coarse grid nodes connected to a fixed fine box, * compute the arithmetically averaged interface stencils; * 5) compare the underlying coarse grid structured stencil shape * to the interface stencil shape to determine how to weight the * averaged interface stencil coefficients. * * EXCEPTION: A NODE CAN CONTAIN ONLY UNSTRUCTURED CONNECTIONS * BETWEEN ONLY TWO AMR LEVELS- I.E., WE CANNOT HAVE A NODE THAT * IS ON THE INTERFACE OF MORE THAN TWO AMR LEVELS. CHANGES TO * HANDLE THIS LATTER CASE WILL INVOLVE THE SEARCH FOR f/c * CONNECTIONS. *-----------------------------------------------------------------*/ if (nUventries > 0) { nvars = hypre_SStructPMatrixNVars(A_pmatrix); for (var1= 0; var1< nvars; var1++) { /*----------------------------------------------------------------- * Yank out the structured stencils for this variable (only like * variables considered) and find their ranks. *-----------------------------------------------------------------*/ stencils = hypre_SStructPMatrixSStencil(A_crse, var1, var1); stencil_size= hypre_StructStencilSize(stencils); stencil_ranks= hypre_TAlloc(HYPRE_Int, stencil_size); rank_stencils= hypre_TAlloc(HYPRE_Int, max_stencil_size); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank( stencil_shape_i, stencil_ranks[i] ); rank_stencils[ stencil_ranks[i] ] = i; } /*----------------------------------------------------------------- * qsort the ranks into ascending order *-----------------------------------------------------------------*/ hypre_qsort0(stencil_ranks, 0, stencil_size-1); crse_smatrix= hypre_SStructPMatrixSMatrix(A_crse, var1, var1); cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); box_starts= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); box_ends = hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); hypre_SStructGraphFindSGridEndpts(graph, part_fine, var1, myid, 0, box_starts); hypre_SStructGraphFindSGridEndpts(graph, part_fine, var1, myid, 1, box_ends); /*----------------------------------------------------------------- * Step 1: scanning over the graph entries to group the locations * of the unstructured connections wrt to fine grid boxes. * * Count the components that couple for each box. * * box_graph_indices[fi]= array of Uventries indices in box fi. * box_graph_cnts[fi] = number of Uventries in box fi. * cdata_space_rank[ci] = begin offset rank of coarse data_space * box ci. *-----------------------------------------------------------------*/ box_array_size = hypre_BoxArraySize(fgrid_boxes); cbox_array_size = hypre_BoxArraySize(cgrid_boxes); box_graph_indices= hypre_CTAlloc(HYPRE_Int *, box_array_size); box_graph_cnts = hypre_CTAlloc(HYPRE_Int , box_array_size); data_space = hypre_StructMatrixDataSpace(crse_smatrix); cdata_space_ranks= hypre_CTAlloc(HYPRE_Int, cbox_array_size); cdata_space_ranks[0]= 0; for (i= 1; i< cbox_array_size; i++) { cdata_space_ranks[i]= cdata_space_ranks[i-1]+ hypre_BoxVolume(hypre_BoxArrayBox(data_space, i-1)); } /*----------------------------------------------------------------- * Scanning obtained by searching iUventries between the start * and end of a fine box. Binary search used to find the interval * between these two endpts. Index (-1) returned if no interval * bounds found. Note that if start has positive index, then end * must have a positive index also. *-----------------------------------------------------------------*/ for (fi= 0; fi< box_array_size; fi++) { i= hypre_LowerBinarySearch(iUventries, box_starts[fi], nUventries); if (i >= 0) { j= hypre_UpperBinarySearch(iUventries, box_ends[fi], nUventries); box_graph_indices[fi]= hypre_TAlloc(HYPRE_Int, j-i+1); for (k= 0; k< (j-i+1); k++) { Uventry= hypre_SStructGraphUVEntry(graph, iUventries[i+k]); for (m= 0; m< hypre_SStructUVEntryNUEntries(Uventry); m++) { if (hypre_SStructUVEntryToPart(Uventry, m) == part_crse) { box_graph_indices[fi][box_graph_cnts[fi]]= iUventries[i+k]; box_graph_cnts[fi]++; break; } } /* for (m= 0; m< hypre_SStructUVEntryNUEntries(Uventry); m++) */ } /* for (k= 0; k< (j-i+1); k++) */ } /* if (i >= 0) */ } /* for (fi= 0; fi< box_array_size; fi++) */ /*----------------------------------------------------------------- * Step 2: * Determine and group the fine-to-coarse connections in a box. * Grouped according to the coarsened fine grid interface nodes. * * box_ranks = ranks of coarsened fine grid interface * nodes. * box_connections = counter for the distinct coarsened fine * grid interface nodes. This can be * used to group all the Uventries of a * coarsened fine grid node. * cindex[l] = the hypre_Index of coarsen node l. * parents_cnodes[l] = parent box that contains the coarsened * fine grid interface node l. * fine_interface_ranks[l]= rank of coarsened fine grid interface * node l. * box_ranks_cnt[l] = counter for no. of Uventries for * coarsened node l. * coarse_contrib_Uv[l] = Uventry indices for Uventries that * contain fine-to-coarse connections of * coarse node l. *-----------------------------------------------------------------*/ for (fi= 0; fi< box_array_size; fi++) { /*------------------------------------------------------------- * Determine the coarse data ptrs corresponding to fine box fi. * These are needed in assigning the averaged unstructured * coefficients. * * Determine how many distinct coarse grid nodes are in the * unstructured connection for a given box. Each node has a * structures. * * temp1 & temp2 are linked lists vectors used for grouping the * Uventries for a given coarse node. *-------------------------------------------------------------*/ box_ranks = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); box_connections = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); parents = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); temp1 = hypre_CTAlloc(HYPRE_Int, box_graph_cnts[fi]+1); temp2 = hypre_CTAlloc(HYPRE_Int, box_graph_cnts[fi]); Uv_cindex = hypre_TAlloc(hypre_Index, box_graph_cnts[fi]); /*------------------------------------------------------------- * determine the parent box of this fgrid_box. *-------------------------------------------------------------*/ hypre_ClearIndex(index_temp); for (i= 0; i < box_graph_cnts[fi]; i++) { Uventry = Uventries[box_graph_indices[fi][i]]; /*------------------------------------------------------------- * Coarsen the fine grid interface nodes and then get their * ranks. The correct coarse grid is needed to determine the * correct data_box. * Save the rank of the coarsened index & the parent box id. *-------------------------------------------------------------*/ hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); hypre_StructMapFineToCoarse(index, index_temp, stridef, Uv_cindex[i]); hypre_BoxSetExtents(&fine_box, Uv_cindex[i], Uv_cindex[i]); for (j= 0; j< cboxi_fcnt[var1][fi]; j++) { ci= cboxi_fboxes[var1][fi][j]; cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { break; } } parents[i] = ci; box_ranks[i]= cdata_space_ranks[ci] + hypre_BoxIndexRank(hypre_BoxArrayBox(data_space, ci), Uv_cindex[i]); } /*--------------------------------------------------------------- * Determine and "group" the Uventries using the box_ranks. * temp2 stores the Uventries indices for a coarsen node. *---------------------------------------------------------------*/ cnt1= 0; j = 0; temp1[cnt1]= j; for (i= 0; i< box_graph_cnts[fi]; i++) { if (box_ranks[i] != -1) { k = box_ranks[i]; box_connections[i]= cnt1; temp2[j++] = box_graph_indices[fi][i]; for (l= i+1; l< box_graph_cnts[fi]; l++) { if (box_ranks[l] == k) { box_connections[l]= cnt1; temp2[j++] = box_graph_indices[fi][l]; box_ranks[l] =-1; } } cnt1++; temp1[cnt1]= j; } } /*----------------------------------------------------------------- * Store the graph entry info and other index info for each coarse * grid node. *-----------------------------------------------------------------*/ parents_cnodes = hypre_TAlloc(HYPRE_Int, cnt1); fine_interface_ranks= hypre_TAlloc(HYPRE_Int, cnt1); box_ranks_cnt = hypre_CTAlloc(HYPRE_Int, cnt1); coarse_contrib_Uv = hypre_TAlloc(HYPRE_Int *, cnt1); cindex = hypre_TAlloc(hypre_Index, cnt1); for (i= 0; i< box_graph_cnts[fi]; i++) { if (box_ranks[i] != -1) { j = box_connections[i]; parents_cnodes[j] = parents[i]; fine_interface_ranks[j]= hypre_BoxIndexRank(hypre_BoxArrayBox(data_space, parents[i]), Uv_cindex[i]); hypre_CopyIndex(Uv_cindex[i], cindex[j]); box_ranks_cnt[j] = temp1[j+1] - temp1[j]; coarse_contrib_Uv[j] = hypre_TAlloc(HYPRE_Int, box_ranks_cnt[j]); l = temp1[j]; for (k= 0; k< box_ranks_cnt[j]; k++) { coarse_contrib_Uv[j][k]= temp2[l+k]; } } } if (box_ranks) hypre_TFree(box_ranks); if (box_connections) hypre_TFree(box_connections); if (parents) hypre_TFree(parents); if (temp1) hypre_TFree(temp1); if (temp2) hypre_TFree(temp2); if (Uv_cindex) hypre_TFree(Uv_cindex); /*------------------------------------------------------------------------ * Step 3: * Create the interface stencils. * * interface_max_stencil_ranks[i] = stencil_shape rank for each coarse * Uentry connection of coarsened node * i (i.e., the stencil_shape ranks of * the interface stencils at node i). * interface_max_stencil_cnt[i][m]= counter for number of Uentries * that describes a connection which * coarsens into stencil_shape rank m. * coarse_stencil_cnts[i] = counter for the no. of distinct * interface stencil_shapes (i.e., the * no. entries of the interface stencil). * interface_stencil_ranks[i][l] = stencil_shape rank for interface * stencil entry l, for coarse node i. * interface_rank_stencils[i][j] = interface stencil entry for * stencil_shape rank j, for node i. *------------------------------------------------------------------------*/ /*----------------------------------------------------------------- * Extract rows & cols info for extracting data from IJ matrix. * Extract for all connections for a box. *-----------------------------------------------------------------*/ hypre_ClearIndex(index_temp); nrows= 0; box_to_ranks_cnt= hypre_CTAlloc(HYPRE_Int, cnt1); for (i= 0; i< cnt1; i++) { for (j= 0; j< box_ranks_cnt[i]; j++) { Uventry = Uventries[ coarse_contrib_Uv[i][j] ]; for (k= 0; k< hypre_SStructUVEntryNUEntries(Uventry); k++) { if (hypre_SStructUVEntryToPart(Uventry, k) == part_crse) { box_to_ranks_cnt[i]++; } } } nrows+= box_to_ranks_cnt[i]; } ncols= hypre_TAlloc(HYPRE_Int, nrows); for (i= 0; i< nrows; i++) { ncols[i]= 1; } rows= hypre_TAlloc(HYPRE_Int, nrows); cols= hypre_TAlloc(HYPRE_Int, nrows); vals= hypre_CTAlloc(HYPRE_Real, nrows); interface_max_stencil_ranks= hypre_TAlloc(HYPRE_Int *, cnt1); interface_max_stencil_cnt = hypre_TAlloc(HYPRE_Int *, cnt1); interface_rank_stencils = hypre_TAlloc(HYPRE_Int *, cnt1); interface_stencil_ranks = hypre_TAlloc(HYPRE_Int *, cnt1); coarse_stencil_cnt = hypre_CTAlloc(HYPRE_Int , cnt1); k= 0; for (i= 0; i< cnt1; i++) { /*----------------------------------------------------------------- * for each coarse interface node, we get a stencil. We compute only * the ranks assuming a maximum size stencil of 27. *-----------------------------------------------------------------*/ interface_max_stencil_ranks[i]= hypre_TAlloc(HYPRE_Int, box_to_ranks_cnt[i]); interface_max_stencil_cnt[i] = hypre_CTAlloc(HYPRE_Int, max_stencil_size); /*----------------------------------------------------------------- * conjugate the coarse node index for determining the stencil * shapes for the Uentry connections. *-----------------------------------------------------------------*/ hypre_CopyIndex(cindex[i], index1); hypre_SetIndex3(index1, -index1[0], -index1[1], -index1[2]); n= 0; for (j= 0; j< box_ranks_cnt[i]; j++) { /*-------------------------------------------------------------- * extract the row rank for a given Uventry. Note that these * are the ranks in the grid of A. Therefore, we grab the index * from the nested_graph Uventry to determine the global rank. * With the rank, find the corresponding Uventry of the graph * of A. The to_ranks now can be extracted out. *--------------------------------------------------------------*/ Uventry = Uventries[ coarse_contrib_Uv[i][j] ]; hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); hypre_SStructGridFindBoxManEntry(grid, part_fine, index, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &rank, matrix_type); Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (l= 0; l< nUentries; l++) { if (hypre_SStructUVEntryToPart(Uventry, l) == part_crse) { to_rank = hypre_SStructUVEntryToRank(Uventry, l); rows[k] = rank; cols[k++]= to_rank; /*--------------------------------------------------------- * compute stencil shape for this Uentry. *---------------------------------------------------------*/ hypre_CopyIndex( hypre_SStructUVEntryToIndex(Uventry,l), index ); hypre_AddIndexes(index, index1, 3, index2); MapStencilRank(index2, m); interface_max_stencil_ranks[i][n++]= m; interface_max_stencil_cnt[i][m]++; } } } hypre_TFree(coarse_contrib_Uv[i]); /*----------------------------------------------------------------- * Determine only the distinct stencil ranks for coarse node i. *-----------------------------------------------------------------*/ l= 0; for (j= 0; j< max_stencil_size; j++) { if (interface_max_stencil_cnt[i][j]) { l++; } } coarse_stencil_cnt[i]= l; interface_stencil_ranks[i]= hypre_TAlloc(HYPRE_Int, l); interface_rank_stencils[i]= hypre_TAlloc(HYPRE_Int, max_stencil_size); /*----------------------------------------------------------------- * For each stencil rank, assign one of the stencil_shape_i index. *-----------------------------------------------------------------*/ l= 0; for (j= 0; j< max_stencil_size; j++) { if (interface_max_stencil_cnt[i][j]) { interface_rank_stencils[i][j]= l; interface_stencil_ranks[i][l]= j; l++; } } } /* for (i= 0; i< cnt1; i++) */ hypre_TFree(coarse_contrib_Uv); hypre_TFree(box_ranks_cnt); hypre_TFree(cindex); /*----------------------------------------------------------------- * Extract data from IJ matrix *-----------------------------------------------------------------*/ HYPRE_IJMatrixGetValues(ij_A, nrows, ncols, rows, cols, vals); hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); /*----------------------------------------------------------------- * Steps 4 & 5: * Compute the arithmetically averaged interface stencils, * and determine the interface stencil weights. * * stencil_vals[l] = averaged stencil coeff for interface * stencil entry l. * common_rank_stencils = final structured coarse stencil entries * for the stencil_shapes that the * interface stencils must collapse to. * common_stencil_ranks = final structured coarse stencil_shape * ranks for the stencil_shapes that the * interface stencils must collapse to. * common_stencil_i = stencil entry of the interface stencil * corresponding to the common * stencil_shape. *-----------------------------------------------------------------*/ k= 0; for (i= 0; i< cnt1; i++) { stencil_vals= hypre_CTAlloc(HYPRE_Real, coarse_stencil_cnt[i]); /*----------------------------------------------------------------- * Compute the arithmetic stencil averages for coarse node i. *-----------------------------------------------------------------*/ for (j= 0; j< box_to_ranks_cnt[i]; j++) { m= interface_max_stencil_ranks[i][j]; l= interface_rank_stencils[i][m]; stencil_vals[l]+= vals[k]/interface_max_stencil_cnt[i][m]; k++; } hypre_TFree(interface_max_stencil_ranks[i]); hypre_TFree(interface_max_stencil_cnt[i]); hypre_TFree(interface_rank_stencils[i]); /*----------------------------------------------------------------- * Determine which stencil has to be formed. This is accomplished * by comparing the coarse grid stencil ranks with the computed * interface stencil ranks. We qsort (if there are more than one * rank) the ranks to give quick comparisons. Note that we need * to swap the elements of stencil_vals & fine_interface_ranks[i]'s * accordingly. *-----------------------------------------------------------------*/ sort= falseV; for (j= 0; j< (coarse_stencil_cnt[i]-1); j++) { if (interface_stencil_ranks[i][j] > interface_stencil_ranks[i][j+1]) { sort= trueV; break; } } if ( (coarse_stencil_cnt[i]>1) && (sort==trueV) ) { temp1= hypre_TAlloc(HYPRE_Int, coarse_stencil_cnt[i]); for (j= 0; j< coarse_stencil_cnt[i]; j++) { temp1[j]= j; } hypre_qsort1(interface_stencil_ranks[i], (HYPRE_Real *) temp1, 0, coarse_stencil_cnt[i]-1); /*--------------------------------------------------------------- * swap the stencil_vals to agree with the rank swapping. *---------------------------------------------------------------*/ temp3 = hypre_TAlloc(HYPRE_Real, coarse_stencil_cnt[i]); for (j=0; j< coarse_stencil_cnt[i]; j++) { m = temp1[j]; temp3[j] = stencil_vals[m]; } for (j=0; j< coarse_stencil_cnt[i]; j++) { stencil_vals[j]= temp3[j]; } hypre_TFree(temp1); hypre_TFree(temp3); } /*----------------------------------------------------------------- * Compute the weights for the averaged stencil contributions. * We need to convert the ranks back to stencil_shapes and then * find the abs of the stencil shape. *-----------------------------------------------------------------*/ temp3= hypre_TAlloc(HYPRE_Real, coarse_stencil_cnt[i]); for (j=0; j< coarse_stencil_cnt[i]; j++) { InverseMapStencilRank(interface_stencil_ranks[i][j], index_temp); AbsStencilShape(index_temp, abs_stencil_shape); temp3[j]= weights[abs_stencil_shape]; } /*----------------------------------------------------------------- * Compare the coarse stencil and the interface stencil and * extract the common stencil shapes. * WE ARE ASSUMING THAT THE COARSE INTERFACE STENCIL HAS SOME * COMMON STENCIL SHAPE WITH THE COARSE STENCIL. *-----------------------------------------------------------------*/ common_rank_stencils= hypre_TAlloc(HYPRE_Int, stencil_size); common_stencil_ranks= hypre_TAlloc(HYPRE_Int, stencil_size); common_stencil_i = hypre_TAlloc(HYPRE_Int, stencil_size); l= 0; m= 0; for (j= 0; j< stencil_size; j++) { while( (l < coarse_stencil_cnt[i]) && (stencil_ranks[j] > interface_stencil_ranks[i][l]) ) { l++; } if (l >= coarse_stencil_cnt[i]) { break; } /*-------------------------------------------------------------- * Check if a common stencil shape rank has been found. *--------------------------------------------------------------*/ if ( (stencil_ranks[j] == interface_stencil_ranks[i][l]) && (l < coarse_stencil_cnt[i]) ) { common_rank_stencils[m]= rank_stencils[ stencil_ranks[j] ]; common_stencil_ranks[m]= stencil_ranks[j]; common_stencil_i[m++] = l; l++; } } /*----------------------------------------------------------------- * Find the contribution and weights for the averaged stencils. *-----------------------------------------------------------------*/ for (j= 0; j< m; j++) { hypre_CopyIndex(hypre_StructStencilElement( stencils, common_rank_stencils[j]), stencil_shape_i); AbsStencilShape(stencil_shape_i, abs_stencil_shape); crse_ptr= hypre_StructMatrixExtractPointerByIndex(crse_smatrix, parents_cnodes[i], stencil_shape_i); /*----------------------------------------------------------------- * For a compact stencil (e.g., -1 <= hypre_Index[i] <= 1, i= 0-2), * the value of abs_stencil_shape can be used to determine the * stencil: * abs_stencil_shape= 3 only corners in 3-d * 2 corners in 2-d; or the centre plane * in 3-d, or e,w,n,s of the bottom * or top plane in 3-d * 1 e,w in 1-d; or e,w,n,s in 2-d; * or the centre plane in 3-d, * or c of the bottom or top plane * in 3-d * 0 c in 1-d, 2-d, or 3-d. *-----------------------------------------------------------------*/ switch(abs_stencil_shape) { case 3: /* corners of 3-d stencil */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; break; case 2: /* corners in 2-d or edges in 3-d */ if (ndim ==2) { l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; } else if (ndim == 3) { /*---------------------------------------------------------- * The edge values are weighted sums of the averaged * coefficients. The weights and averaged coefficients must * be found. The contributions are found using the stencil * ranks and the stencil ordering * top: 14 12 13 centre: 5 3 4 bottom 23 21 22 * 11 9 10 2 0 1 20 18 19 * 17 15 16 8 6 7 26 24 25 *----------------------------------------------------------*/ l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 2); switch(l) { case 4: /* centre plane ne */ temp1[0]= 13; temp1[1]= 22; break; case 5: /* centre plane nw */ temp1[0]= 14; temp1[1]= 23; break; case 7: /* centre plane se */ temp1[0]= 16; temp1[1]= 25; break; case 8: /* centre plane sw */ temp1[0]= 17; temp1[1]= 26; break; case 10: /* top plane e */ temp1[0]= 13; temp1[1]= 16; break; case 11: /* top plane w */ temp1[0]= 14; temp1[1]= 17; break; case 12: /* top plane n */ temp1[0]= 13; temp1[1]= 14; break; case 15: /* top plane s */ temp1[0]= 16; temp1[1]= 17; break; case 19: /* bottom plane e */ temp1[0]= 22; temp1[1]= 25; break; case 20: /* bottom plane w */ temp1[0]= 23; temp1[1]= 26; break; case 21: /* bottom plane n */ temp1[0]= 22; temp1[1]= 23; break; case 24: /* bottom plane s */ temp1[0]= 25; temp1[1]= 26; break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. This involves searching the ranks of * interface_stencil_ranks. The weights must be averaged. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 2; l++) { while ( (n < coarse_stencil_cnt[i]) &&(interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else if (ndim == 3) */ break; case 1: /* e,w in 1-d, or edges in 2-d, or faces in 3-d */ if (ndim == 1) { l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; } else if (ndim == 2) { l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 2); switch(l) { case 1: /* e */ temp1[0]= 4; temp1[1]= 7; break; case 2: /* w */ temp1[0]= 5; temp1[1]= 8; break; case 3: /* n */ temp1[0]= 4; temp1[1]= 5; break; case 6: /* s */ temp1[0]= 7; temp1[1]= 8; break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 2; l++) { while ( (n < coarse_stencil_cnt[i]) &&(interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else if (ndim == 2) */ else /* 3-d */ { l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 8); switch(l) { case 1: /* centre plane e */ temp1[0]= 4; temp1[1]= 7; temp1[2]= 10; temp1[3]= 13; temp1[4]= 16; temp1[5]= 19; temp1[6]= 22; temp1[7]= 25; break; case 2: /* centre plane w */ temp1[0]= 5; temp1[1]= 8; temp1[2]= 11; temp1[3]= 14; temp1[4]= 17; temp1[5]= 20; temp1[6]= 23; temp1[7]= 26; break; case 3: /* centre plane n */ temp1[0]= 4; temp1[1]= 5; temp1[2]= 12; temp1[3]= 13; temp1[4]= 14; temp1[5]= 21; temp1[6]= 22; temp1[7]= 23; break; case 6: /* centre plane s */ temp1[0]= 7; temp1[1]= 8; temp1[2]= 15; temp1[3]= 16; temp1[4]= 17; temp1[5]= 24; temp1[6]= 25; temp1[7]= 26; break; case 9: /* top plane c */ for (n= 0; n< 8; n++) { temp1[n]= 10+n; } break; case 18: /* bottom plane c */ for (n= 0; n< 8; n++) { temp1[n]= 19+n; } break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 8; l++) { while ( (n < coarse_stencil_cnt[i]) && (interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else */ break; } /* switch(abs_stencil_shape) */ } /* for (j= 0; j< m; j++) */ hypre_TFree(interface_stencil_ranks[i]); hypre_TFree(stencil_vals); hypre_TFree(temp3); hypre_TFree(common_rank_stencils); hypre_TFree(common_stencil_ranks); hypre_TFree(common_stencil_ranks); hypre_TFree(common_stencil_i); } /* for (i= 0; i< cnt1; i++) */ hypre_TFree(box_to_ranks_cnt); hypre_TFree(interface_max_stencil_ranks); hypre_TFree(interface_max_stencil_cnt); hypre_TFree(interface_rank_stencils); hypre_TFree(interface_stencil_ranks); hypre_TFree(coarse_stencil_cnt); hypre_TFree(fine_interface_ranks); hypre_TFree(parents_cnodes); hypre_TFree(vals); /*----------------------------------------------------------- * Box fi is completed. *-----------------------------------------------------------*/ } /* for (fi= 0; fi< box_array_size; fi++) */ hypre_TFree(stencil_ranks); hypre_TFree(rank_stencils); hypre_TFree(cdata_space_ranks); hypre_TFree(box_graph_cnts); for (i= 0; i< box_array_size; i++) { if (box_graph_indices[i]) hypre_TFree(box_graph_indices[i]); } hypre_TFree(box_graph_indices); hypre_TFree(box_starts); hypre_TFree(box_ends); } /* for (var1= 0; var1< nvars; var1++) */ } /* if (nUventries > 0) */ /*-------------------------------------------------------------------------- * STEP 3: * Coarsened f/c interface coefficients can be used to create the * centre components along the coarsened f/c nodes now. Loop over * the coarsened fbox_bdy's and set the centre stencils. *--------------------------------------------------------------------------*/ hypre_ClearIndex(index_temp); for (var1= 0; var1< nvars; var1++) { /* only like variables couple. */ smatrix_var = hypre_SStructPMatrixSMatrix(A_crse, var1, var1); stencils = hypre_SStructPMatrixSStencil(A_crse, var1, var1); stencil_size = hypre_StructStencilSize(stencils); a_ptrs = hypre_TAlloc(HYPRE_Real *, stencil_size); rank_stencils= hypre_TAlloc(HYPRE_Int, max_stencil_size); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank(stencil_shape_i, rank); rank_stencils[rank]= i; } centre= rank_stencils[0]; cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); hypre_ForBoxI(ci, cgrid_boxes) { A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), ci); fbox_bdy_ci= fbox_bdy[var1][ci]; for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, ci, stencil_shape_i); } /*------------------------------------------------------------------ * Loop over the boundaries of each patch inside cgrid_box ci. * These patch boxes must be coarsened to get the correct extents. *------------------------------------------------------------------*/ hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) { fbox_bdy_ci_fi= hypre_BoxArrayArrayBoxArray(fbox_bdy_ci, arrayi); hypre_ForBoxI(fi, fbox_bdy_ci_fi) { fgrid_box= hypre_BoxArrayBox(fbox_bdy_ci_fi, fi); hypre_StructMapFineToCoarse(hypre_BoxIMin(fgrid_box), index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop1Begin(ndim, loop_size, A_dbox, cstart, stridec, iA); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,i) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iA) { for (i= 0; i< stencil_size; i++) { if (i != centre) { a_ptrs[centre][iA]-= a_ptrs[i][iA]; } } } hypre_BoxLoop1End(iA); } /* hypre_ForBoxI(fi, fbox_bdy_ci_fi) */ } /* hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ hypre_TFree(a_ptrs); hypre_TFree(rank_stencils); } /* for (var1= 0; var1< nvars; var1++) */ for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); hypre_ForBoxI(ci, cgrid_boxes) { hypre_BoxArrayDestroy(fgrid_crse_extents[var1][ci]); hypre_BoxArrayDestroy(fbox_interior[var1][ci]); hypre_BoxArrayArrayDestroy(fbox_bdy[var1][ci]); hypre_TFree(interior_fboxi[var1][ci]); hypre_TFree(bdy_fboxi[var1][ci]); } hypre_TFree(fgrid_crse_extents[var1]); hypre_TFree(fbox_interior[var1]); hypre_TFree(fbox_bdy[var1]); hypre_TFree(interior_fboxi[var1]); hypre_TFree(bdy_fboxi[var1]); hypre_ForBoxI(fi, fgrid_boxes) { hypre_TFree(cboxi_fboxes[var1][fi]); } hypre_TFree(cboxi_fboxes[var1]); hypre_TFree(cboxi_fcnt[var1]); } hypre_TFree(fgrid_crse_extents); hypre_TFree(fbox_interior); hypre_TFree(fbox_bdy); hypre_TFree(interior_fboxi); hypre_TFree(bdy_fboxi); hypre_TFree(cboxi_fboxes); hypre_TFree(cboxi_fcnt); return 0; }
Error_Analysis.h
/*************************************************************************** * Copyright (C) 2009-2012 by Florian Goth * * fgoth@wthp095 * * * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ***************************************************************************/ #ifndef ERROR_ANALYSIS_H #define ERROR_ANALYSIS_H #include <string> #include <fstream> #include <type_traits> #include "AverageSign.h" #include "errordata.h" #include "jackknife.h" #include "ClientState.h" #include <complex> #include <valarray> #include <algorithm> #include <unistd.h> /* //forward declare Observable template <class ObsT, class SignType> class Observable;*/ //let's carve in stone that the sign is always stored last... template <typename T> class ReFunction { public: inline T operator()(std::complex<T>* dat) { return (dat[0].real()*dat[len-1].real() + dat[0].imag()*dat[len-1].imag())/(dat[len-1].real()*dat[len-1].real() + dat[len-1].imag()*dat[len-1].imag()); } typedef T res_t; ReFunction(uint l) : len(l) {} private: uint len; }; template <typename T> class ImFunction { public: inline T operator() (std::complex<T>* dat) { return (dat[0].imag() * dat[len-1].real() - dat[len-1].imag()*dat[0].real())/(dat[len-1].real()*dat[len-1].real() + dat[len-1].imag() * dat[len-1].imag()); } typedef T res_t; ImFunction(uint l) : len(l) {} private: uint len; }; template <typename T> class ReVecFunction { public: inline std::valarray<T> operator()(std::valarray<std::complex<T> >& dat) { auto denom = dat[len-1].real()*dat[len-1].real() + dat[len-1].imag()*dat[len-1].imag(); std::valarray<T> retval(len); for(uint k = 0; k < (len - 1); ++k) { retval[k] = (dat[k].real()*dat[len-1].real() + dat[k].imag()*dat[len-1].imag())/denom; } return retval; } typedef std::valarray<T> res_t; ReVecFunction(uint l) : len(l) {} private: uint len; }; template <typename T> class ImVecFunction { public: inline std::valarray<T> operator() (std::valarray<std::complex<T> >& dat) { auto denom = dat[len-1].real()*dat[len-1].real() + dat[len-1].imag()*dat[len-1].imag(); std::valarray<T> retval(len); for(uint k = 0; k < (len - 1); ++k) retval[k] = (dat[k].imag() * dat[len-1].real() - dat[len-1].imag()*dat[k].real())/denom; return retval; } typedef std::valarray<T> res_t; ImVecFunction(uint l) : len(l) {} private: uint len; }; template <typename T> class PlainSign { public: inline T operator() (T* dat) { return dat[0]/dat[len-1]; } typedef T res_t; PlainSign(uint l) : len(l) {} private: uint len; }; template <typename T> class PlainVecSign { public: inline T operator() (T* dat) { std::valarray<T> retval(len); for(uint k = 0; k < len-1; ++k) retval[k] = dat[k]/dat[len-1]; return retval; } inline std::valarray<T> operator() (std::valarray<T>& dat) { std::valarray<T> retval(len); for(uint k = 0; k < (len-1); ++k) retval[k] = dat[k]/dat[len-1]; return retval; } typedef std::valarray<T> res_t; PlainVecSign(uint l) : len(l) {} private: uint len; }; template <typename T> class PlainSign<std::complex<T> > {//this is supposed to be used if we mix a complex observable and a real sign public: inline std::complex<T> operator() (std::complex<T>* dat) { return dat[0]/real(dat[len-1]); } typedef std::complex<T> res_t; PlainSign< std::complex< T > >(uint l) : len(l) {} private: uint len; }; template <typename T> class PlainVecSign<std::complex<T> > {//this is supposed to be used if we mix a complex observable and a real sign public: inline std::valarray<std::complex<T> > operator() (std::complex<T>* dat) { std::valarray<std::complex<T> > retval(len); for(uint k = 0; k < len-1; ++k) retval[k] = dat[k]/ dat[len-1]; return retval; } typedef std::valarray<std::complex<T> > res_t; PlainVecSign< std::complex< T > >(uint l) : len(l) {} private: uint len; }; template <typename FPType> inline mc_analysis::errordata<std::complex<FPType> > handlesign(FileVector<std::complex<FPType> >& a, FileVector<std::complex<FPType> >& b) { typedef std::complex<FPType> SignType; std::valarray<std::complex<FPType> > cont[2]; cont[0].resize(a.size()); cont[1].resize(b.size()); for (std::size_t k = 0; k < a.size(); ++k) { cont[0][k] = a[k]; cont[1][k] = b[k]; } mc_analysis::errordata<SignType> ed = mc_analysis::jackknife(ReFunction<FPType>(2), cont, 2); mc_analysis::errordata<SignType> edim = mc_analysis::jackknife(ImFunction<FPType>(2), cont, 2); return mc_analysis::errordata<std::complex<FPType> >(std::complex<FPType>(ed.get_mean().real(), edim.get_mean().real()), std::complex<FPType>(ed.get_error().real(), edim.get_error().real()), std::complex<FPType>(ed.get_bias().real(), edim.get_bias().real())); } template <typename FPType> inline mc_analysis::errordata<std::complex<FPType> > handlesign(FileVector<std::complex<FPType> >& a, std::valarray<std::complex<FPType> >& b) { typedef std::complex<FPType> SignType; std::valarray<std::complex<FPType> > cont[2]; cont[0].resize(a.size()); cont[1].resize(b.size()); for (typename FileVector<std::complex<FPType> >::size_type k = 0; k < a.size(); ++k) { cont[0][k] = a[k]; cont[1][k] = b[k]; } mc_analysis::errordata<typename ReFunction<FPType>::res_t> ed = mc_analysis::jackknife(ReFunction<FPType>(2), (&cont[0]), 2); mc_analysis::errordata<typename ImFunction<FPType>::res_t> edim = mc_analysis::jackknife(ImFunction<FPType>(2), (&cont[0]), 2); return mc_analysis::errordata<std::complex<FPType> >(std::complex<FPType>(ed.get_mean(), edim.get_mean()), std::complex<FPType>(ed.get_error(), edim.get_error()), std::complex<FPType>(ed.get_bias(), edim.get_bias())); } template <typename FPType> inline mc_analysis::errordata<std::complex<FPType> > handlesign(FileVector<std::complex<FPType> >& a, std::valarray<FPType>& b) {//the twist is here that the observable is a complex quantity whereas the sign is real typedef FPType SignType; std::valarray<std::complex<FPType> > cont[2]; cont[0].resize(a.size()); cont[1].resize(b.size()); for (typename FileVector<std::complex<FPType> >::size_type k = 0; k < a.size(); ++k) { cont[0][k] = a[k]; cont[1][k] = b[k]; } return mc_analysis::jackknife(PlainSign<std::complex<FPType> >(2), cont, 2); } template <typename FPType> mc_analysis::errordata<std::complex<FPType> > handlesign(const std::valarray<std::complex<FPType> >& a, const std::valarray<FPType>& b) { typedef std::complex<FPType> ObsType; std::valarray<ObsType> cont[2]; cont[0].resize(a.size()); cont[1].resize(b.size()); for (std::size_t k = 0; k < a.size(); ++k) { cont[0][k] = a[k]; cont[1][k] = b[k]; } return mc_analysis::jackknife (PlainSign<ObsType>(2), cont, 2); } template <typename FPType> mc_analysis::errordata<FPType> handlesign(const std::valarray<FPType>& a, const std::valarray<FPType>& b) { typedef FPType SignType; const std::valarray<SignType> *const cont[2] = {&a, &b}; return mc_analysis::jackknife (PlainSign<FPType>(2), cont, 2); } template <typename FPType> inline mc_analysis::errordata<FPType> handlesign(FileVector<FPType>& a, std::valarray<FPType>& b) { typedef FPType SignType; std::valarray<FPType> cont[2]; cont[0].resize(a.size()); cont[1].resize(b.size()); for (typename FileVector<FPType>::size_type k = 0; k < a.size(); ++k) { cont[0][k] = a[k]; cont[1][k] = b[k]; } return mc_analysis::jackknife(PlainSign<FPType>(2), cont, 2); } template <typename FPType> mc_analysis::errordata<std::complex<FPType> > handlesign(const std::valarray<std::complex<FPType> >& a, const std::valarray<std::complex<FPType> >& b) { const std::valarray<std::complex<FPType> > *const cont[2] = {&a, &b}; mc_analysis::errordata<typename ReFunction<FPType>::res_t> ed = mc_analysis::jackknife (ReFunction<FPType>(2), cont, 2); mc_analysis::errordata<typename ImFunction<FPType>::res_t> edim = mc_analysis::jackknife(ImFunction<FPType>(2), cont, 2); return mc_analysis::errordata<std::complex<FPType> >(std::complex<FPType>(ed.get_mean(), edim.get_mean()), std::complex<FPType>(ed.get_error(), edim.get_error()), std::complex<FPType>(ed.get_bias(), edim.get_bias())); } template <typename FPType> mc_analysis::errordata<std::valarray<std::complex<FPType> > > handlesign(const std::valarray<std::valarray<std::complex<FPType> > >& a, const std::valarray<FPType>& b) { std::valarray<std::complex<FPType> > sign(b.size()); for(uint k = 0; k < b.size(); ++k) sign[k] = b[k]; const std::valarray<std::complex<FPType> > ** cont = new const std::valarray<std::complex<FPType> > *[a.size() + 1]; for(uint k = 0; k < a.size(); ++k) cont[k] = &(a[k]); cont[a.size()] = &sign; mc_analysis::errordata<typename PlainVecSign<std::complex<FPType> >::res_t> ed = mc_analysis::jackknife(PlainVecSign<std::complex<FPType> >(a.size() + 1), cont, a.size() + 1); return ed; } template <typename FPType> mc_analysis::errordata<std::valarray<std::complex<FPType> > > handlesign(const std::valarray<std::valarray<std::complex<FPType> > >& a, bool covariance) { uint funnr = a[0].size(); mc_analysis::errordata<typename ReVecFunction<FPType>::res_t> ed = mc_analysis::vecjackknife(ReVecFunction<FPType>(funnr), a, funnr, covariance); mc_analysis::errordata<typename ImVecFunction<FPType>::res_t> edim = mc_analysis::vecjackknife(ImVecFunction<FPType >(funnr), a, funnr, covariance); std::valarray<std::complex<FPType> > retval(funnr-1); std::valarray<std::complex<FPType> > retvalerr(funnr-1); std::valarray<std::complex<FPType> > retvalbias(funnr-1); std::valarray<std::complex<FPType> > retvalcov((funnr - 1) * (funnr - 1)); for(uint y = 0; y < (funnr-1); ++y) { retval[y] = std::complex<FPType>(ed.get_mean()[y], edim.get_mean()[y]); retvalerr[y] = std::complex<FPType>(ed.get_error()[y], edim.get_error()[y]); retvalbias[y] = std::complex<FPType>(ed.get_bias()[y], edim.get_bias()[y]); if(covariance) { for(uint j = 0; j < (funnr - 1); ++j) { retvalcov[y*(funnr - 1) + j] = std::complex<FPType>(ed.getCov()[y*funnr + j], edim.getCov()[y*funnr + j]); } } } mc_analysis::errordata<std::valarray<std::complex<FPType> > > finalretval(retval, retvalerr, retvalbias); if(covariance) finalretval.setCov(retvalcov); return finalretval; } /** * This handlesign deals with the preparation of the covariance in the case of a real function. * The array "a" contains in the last row the sign. * @param a an array containing the function and in the last index the average sign * @param covariance Shall we calculate a covariance matrix * @return errors and covariances * */ template <typename FPType> mc_analysis::errordata<std::valarray<FPType> > handlesign(const std::valarray<std::valarray<FPType> >& a, bool covariance) { uint funnr = a[0].size(); mc_analysis::errordata<typename PlainVecSign<FPType>::res_t> ed = mc_analysis::vecjackknife(PlainVecSign<FPType>(funnr), a, funnr, covariance); std::valarray<FPType> retval(funnr-1); std::valarray<FPType> retvalerr(funnr-1); std::valarray<FPType> retvalbias(funnr-1); std::valarray<FPType> retvalcov((funnr - 1) * (funnr - 1)); for(uint y = 0; y < (funnr-1); ++y) { retval[y] = ed.get_mean()[y]; retvalerr[y] = ed.get_error()[y]; retvalbias[y] = ed.get_bias()[y]; if(covariance) { for(uint j = 0; j < (funnr - 1); ++j) { retvalcov[y*(funnr - 1) + j] = ed.getCov()[y*funnr + j]; } } } mc_analysis::errordata<std::valarray<FPType> > finalretval(retval, retvalerr, retvalbias); if(covariance) finalretval.setCov(retvalcov); return finalretval; } template <typename FPType> mc_analysis::errordata<std::valarray<std::complex<FPType> > > handlesign(const FileVector<std::valarray<std::complex<FPType> > >& a, const std::valarray<std::complex<FPType> >& b) { const std::valarray<std::complex<FPType> > *const* cont = new const std::valarray<std::complex<FPType> > *const[a.size() + 1]; for(uint k = 0; k < a.size(); ++k) cont[k] = &(a[k]); cont[a.size()] = &b; mc_analysis::errordata<typename ReVecFunction<FPType>::res_t> ed = mc_analysis::jackknife (ReFunction<FPType>(a.size() + 1), cont, a.size() + 1); mc_analysis::errordata<typename ImVecFunction<FPType>::res_t> edim = mc_analysis::jackknife(ImFunction<FPType>(a.size() + 1), cont, a.size() + 1); std::valarray<std::complex<FPType> > retval(a.size()); std::valarray<std::complex<FPType> > retvalerr(a.size()); std::valarray<std::complex<FPType> > retvalbias(a.size()); for(uint k = 0; k < a.size(); ++k) { retval[k] = std::complex<FPType>(ed.get_mean()[k], edim.get_mean()[k]); retvalerr[k] = std::complex<FPType>(ed.get_error()[k], edim.get_error()[k]); retvalbias[k] = std::complex<FPType>(ed.get_bias()[k], edim.get_bias()[k]); } return mc_analysis::errordata<std::complex<FPType> >(retval, retvalerr, retvalbias); } template<class ObsType, typename SignType> class Error_Analysis { public: /** @param p the path to which we store @param obs the observable @param avsign the AverageSign used for determining the value of the observable */ static void analyze(const std::string& p, Observable<ObsType, SignType>& obs, AverageSign<SignType>& avsign); private: }; template<class ObsType, typename SignType> void Error_Analysis<ObsType, SignType>::analyze(const std::string& p, Observable<ObsType, SignType>& obs, AverageSign<SignType>& avsign) { std::ofstream out((p + obs.myname).c_str()); mc_analysis::errordata<ObsType> ed = handlesign<typename TypetoFPType<SignType>::Ret >(obs.fv, avsign.signcache); ObsType mean = ed.get_mean(); ObsType err = ed.get_error(); out<<mean<<" +- "<<err<<" Bias: "<<ed.get_bias()<<std::endl; } template<class T, typename SignType> class Error_Analysis<std::valarray<T>, SignType> { public: /** @param p the path to which we store @param obs the Observable @param avsign the AverageSign used for determining the value of the observable */ static void analyze(const std::string& p, Observable<std::valarray<T>, SignType>& obs, AverageSign<SignType>& avsign); private: }; template <typename FPType> struct FPIndex { static inline FPType call(uint j, FPType delta_s ) {return j * delta_s;} }; template <typename FPType> struct IntIndex { static inline FPType call(uint j, FPType) {return j;} }; /** @param fileRe the file to which we write the realpart @param fileIm the file to which we write the imaginarypart @param slices the number of points the function has @param delta_s the resolution of the function */ template <class Cont, class SignType, class FPType, class IndexTrait> inline void writeFunctiontoFile(Cont& container, const valarray<SignType>& avs, std::ofstream& fileRe, std::ofstream& fileIm, unsigned int functionpoints, FPType delta_s, IndexTrait indextrait) {//Cont is usually a file vector typedef typename Cont::value_type::value_type ScalarType; unsigned int nroffunctions = container.size();//get nr. of functions std::valarray<ScalarType> temp(nroffunctions);//allocate a valarray for one function for (unsigned int j = 0; j < functionpoints; ++j)//for all functionpoints { for (unsigned int k = 0; k < nroffunctions; ++k)//copy function points temp[k] = container[k][j]; mc_analysis::errordata<ScalarType> edp(handlesign(temp, avs));//do sign - analysis //output data FPType idx = IndexTrait::call(j, delta_s); fileRe<<idx<<" "<<real(edp.get_mean())<<" "<<real(edp.get_error())<<std::endl; fileIm<<idx<<" "<<imag(edp.get_mean())<<" "<<imag(edp.get_error())<<std::endl; } return; } /** @param file the file to which we write the realpart @param slices the number of points the function has @param delta_s the resolution of the function */ template <class Cont, class FPType> inline void writeFunctiontoFile(Cont& container, const valarray<FPType>& avs, std::ofstream& file, unsigned int functionpoints, FPType delta_s) { unsigned int nroffunctions = container.size();//get nr. of functions std::valarray<FPType> temp(nroffunctions);//allocate a valarray for one function for (unsigned int j = 0; j < functionpoints; ++j)//for all functionpoints { for (unsigned int k = 0; k < nroffunctions; ++k)//copy function points temp[k] = container[k][j]; mc_analysis::errordata<FPType> edp(handlesign(temp, avs));//do sign - analysis //output data file<<j * delta_s<<" "<<edp.get_mean()<<" "<<edp.get_error()<<std::endl; } return; } template <class FPType> struct Point { Point(FPType a, unsigned int b, FPType c, FPType d) : t(a), x(b), y(c), dy(d) {} FPType t; unsigned int x; FPType y,dy; inline bool operator<(const Point& rhs) const { if (t >= rhs.t) { if (t == rhs.t) { if (x >= rhs.x) { return false; } return true; } return false; } return true; } }; /** * This gets called for every complex Function, e.g. a complex Green's function @param fileRe the file to which we write the realpart @param fileIm the file to which we write the imaginarypart @param len the length of the Vector @param slices the number of points the function has @param delta_s the resolution of the function */ template <class Cont, class SignType, typename FPType> static inline void writeVectorFunctiontoFile(Cont& container, unsigned int len, const valarray<SignType>& avsign, std::string& nameRe, std::string& nameIm, unsigned int slices, FPType delta_s, bool covariance) { std::ofstream fileRe(nameRe.c_str()); std::ofstream fileIm(nameIm.c_str()); std::ofstream covfileRe((nameRe+"Cov").c_str()); std::ofstream covfileIm((nameIm+"Cov").c_str()); //the template parameter Cont is usually a filevector typedef typename Cont::value_type::value_type::value_type ScalarType; std::ofstream xdep[5]; for (unsigned int i = 0; i < 5; ++i) { unsigned int dt = 2 << i; xdep[i].open((nameRe + "_" + toString(dt)).c_str()); } container.sync(); std::vector<Point<FPType> > points; points.reserve(4*slices); unsigned int nroffunctions = container.size();//get nr. of bins std::valarray<std::valarray<ScalarType> > temp(nroffunctions + 1);//allocate a valarray for one function for(uint j = 0; j < (nroffunctions+1); ++j) temp[j].resize(slices+1); for(uint k = 0; k < len; ++k) // for every point { for (uint j = 0; j < nroffunctions; ++j) //get every bin { auto ret = container(j, k); for(uint i = 0; i < ret.size(); ++i) temp[j][i] = ret[i]; temp[j][ret.size()] = avsign[j];//we copy the sign bins into the last index } mc_analysis::errordata<std::valarray<ScalarType> > edp(handlesign(temp, covariance));//do sign - analysis for(uint l = 0; l < slices; ++l) { //output data points.push_back(Point<FPType>(l * delta_s, k, real(edp.get_mean()[l]), real(edp.get_error()[l]))); fileRe<<l * delta_s<<" "<<real(edp.get_mean()[l])<<" "<<real(edp.get_error()[l])<<std::endl; fileIm<<l * delta_s<<" "<<imag(edp.get_mean()[l])<<" "<<imag(edp.get_error()[l])<<std::endl; } //output xmgrace separators fileRe<<"&"<<std::endl; fileIm<<"&"<<std::endl; if(covariance) { for(uint k = 0; k < slices; ++k) { for(uint j = 0; j < slices; ++j) { covfileRe<<real(edp.getCov()[k*slices + j])<<" "; covfileIm<<imag(edp.getCov()[k*slices + j])<<" "; } covfileRe<<std::endl; covfileIm<<std::endl; } } } // for (unsigned int k = 0; k < len; ++k)//for every function that we stored // { // for (unsigned int j = 0; j < slices; ++j)//for all functionpoints // { // for (unsigned int l = 0; l < nroffunctions; ++l)//copy function points // temp[l] = container(l,k)[j]; // mc_analysis::errordata<FPType> edp(handlesign(temp, avsign));//do sign - analysis // //output data // points.push_back(Point<FPType>(j * delta_s, k, edp.get_mean(), edp.get_error())); // file<<j * delta_s<<" "<<edp.get_mean()<<" "<<edp.get_error()<<std::endl; // } // //output xmgrace separators // file<<"&"<<std::endl; // } //now let's generate the transformed data sort(points.begin(), points.end()); #pragma omp parallel for for(size_t i = 0; i < 5; ++i) { unsigned int inc = 2 << i; for(unsigned int k = 0; k < slices; k += inc) { for(unsigned int j = 0; j < len; ++j) { xdep[i]<<j<<" "<<points[k*len+j].y<<" "<<points[k*len+j].dy<<std::endl; } xdep[i]<<"&"<<std::endl; } } return; } // /** // @param fileRe the file to which we write the realpart // @param fileIm the file to which we write the imaginarypart // @param len the length of the Vector // @param slices the number of points the function has // @param delta_s the resolution of the function // */ // template <class Cont, class SignType, typename FPType> // static inline void writeVectorFunctiontoFile(Cont& container, unsigned int len, const valarray<SignType>& avsign, std::string& nameRe, std::string& nameIm, unsigned int slices, FPType delta_s) // { // std::ofstream fileRe(nameRe.c_str()); // std::ofstream fileIm(nameIm.c_str()); // //the template parameter Cont is usually a filevector // typedef typename Cont::value_type::value_type::value_type ScalarType; // std::ofstream xdep[5]; // for (unsigned int i = 0; i < 5; ++i) // { // unsigned int dt = 2 << i; // xdep[i].open((nameRe + "_" + toString(dt)).c_str()); // } // container.sync(); // std::vector<Point<FPType> > points; // points.reserve(4*slices); // unsigned int nrbins = container.size();//get nr. of bins // uint availmem = sysconf (_SC_AVPHYS_PAGES) * sysconf(_SC_PAGESIZE);//some approximation to the amount of available memory // uint memoryperpoint = sizeof(ScalarType)*nrbins; // int possibleparallelism = std::min(availmem / memoryperpoint, slices); // std::valarray<ScalarType>* temp = NULL; // bool memcritical = (possibleparallelism == 0); // std::cout<<"Possible parallelism: "<<possibleparallelism<<std::endl; // if(!memcritical) // { // try // { // temp = new std::valarray<ScalarType>[possibleparallelism]; // for(int j = 0; j < possibleparallelism; ++j) // temp[j].resize(nrbins); // } // catch(std::bad_alloc& e) // { // delete [] temp; // memcritical = true; // } // } // if(memcritical) // {//memory is critical. let's try it anyway and see if the kernel frees memory. // possibleparallelism = 1; // temp = new std::valarray<ScalarType>(nrbins); // } // // std::valarray<ScalarType> temp(nrbins);//allocate a valarray for one function // for (unsigned int k = 0; k < len; ++k)//for every index of the function that we stored // { // for (unsigned int j = 0; j < slices; j += possibleparallelism)//for all functionpoints // { // int upperlimit = possibleparallelism; // if(j + possibleparallelism >= slices) // upperlimit = slices - j;//determine the remaining indices // for (unsigned int l = 0; l < nrbins; ++l)//copy function points // { // auto tempdatafromfile = container(l, k); // for(int j1 = 0; j1 < upperlimit; ++j1) // temp[j1][l] = tempdatafromfile[j+j1]; // // temp[l] = container(l, k)[j]; // } // for(int j1 = 0; j1 < upperlimit; ++j1) // { // mc_analysis::errordata<ScalarType> edp(handlesign(temp[j1], avsign));//do sign - analysis // //output data // points.push_back(Point<FPType>((j+j1) * delta_s, k, real(edp.get_mean()), real(edp.get_error()))); // fileRe<<(j+j1) * delta_s<<" "<<real(edp.get_mean())<<" "<<real(edp.get_error())<<std::endl; // fileIm<<(j+j1) * delta_s<<" "<<imag(edp.get_mean())<<" "<<imag(edp.get_error())<<std::endl; // } // } // //output xmgrace separators // fileRe<<"&"<<std::endl; // fileIm<<"&"<<std::endl; // } // delete [] temp; // //now let's generate the transformed data // sort(points.begin(), points.end()); // #pragma omp parallel for schedule(dynamic) // for(size_t i = 0; i < 5; ++i) // { // unsigned int inc = 2 << i; // for(unsigned int k = 0; k < slices; k += inc) // { // for(unsigned int j = 0; j < len; ++j) // { // xdep[i]<<j<<" "<<points[k*len+j].y<<" "<<points[k*len+j].dy<<std::endl; // } // xdep[i]<<"&"<<std::endl; // } // } // return; // } /** * This gets called for every real function, e.g. The Green's function if it is real. @param name the file to which we write the data @param len the length of the Vector @param slices the number of points the function has @param delta_s the resolution of the function */ template <class Cont, typename FPType> static inline void writeVectorFunctiontoFile(Cont& container, unsigned int len, const valarray<FPType>& avsign, std::string& name, unsigned int slices, FPType delta_s, bool covariance) { //the template parameter Cont is usually a filevector typedef typename Cont::value_type::value_type::value_type ScalarType; std::ofstream file(name.c_str()); std::ofstream covfile((name + "_Cov").c_str()); std::ofstream xdep[5]; for (unsigned int i = 0; i < 5; ++i) { unsigned int dt = 2 << i; xdep[i].open((name + "_" +toString(dt)).c_str()); } container.sync(); std::vector<Point<FPType> > points; points.reserve(4*slices); unsigned int nroffunctions = container.size();//get nr. of bins std::valarray<std::valarray<ScalarType> > temp(nroffunctions + 1);//allocate a valarray for one function + the sign for(uint j = 0; j < (nroffunctions + 1); ++j) temp[j].resize(slices + 1); for (unsigned int k = 0; k < len; ++k)//for every function that we stored { for(uint j = 0; j < nroffunctions; ++j) //get every bin { auto ret = container(j, k); for(uint i = 0; i < ret.size(); ++i) temp[j][i] = ret[i]; temp[j][ret.size()] = avsign[j]; } mc_analysis::errordata< std::valarray< ScalarType > > edp(handlesign(temp, covariance));//do sign - analysis for(uint l = 0; l < slices; ++l) { //output data points.push_back(Point<FPType>(l * delta_s, k, edp.get_mean()[l], edp.get_error()[l])); file<<l * delta_s<<" "<<edp.get_mean()[l]<<" "<<edp.get_error()[l]<<std::endl; } //output xmgrace separators file<<"&"<<std::endl; if(covariance) { for(uint k = 0; k < slices; ++k) { for(uint j = 0; j < slices; ++j) covfile<<edp.getCov()[k*slices + j]<<" "; covfile<<std::endl; } } } //now let's generate the transformed data sort(points.begin(), points.end()); #pragma omp parallel for for(size_t i = 0; i < 5; ++i) { unsigned int inc = 2 << i; for(unsigned int k = 0; k < slices; k += inc) { for(unsigned int j = 0; j < len; ++j) { xdep[i]<<j<<" "<<points[k*len+j].y<<" "<<points[k*len+j].dy<<std::endl; } xdep[i]<<"&"<<std::endl; } } return; } template<class T, typename SignType> void Error_Analysis<std::valarray<T>, SignType>::analyze(const std::string& p, Observable<std::valarray<T>, SignType>& obs, AverageSign<SignType>& avsign) { std::ofstream outRe((p + obs.myname + string("_Re")).c_str()); std::ofstream outIm((p + obs.myname + string("_Im")).c_str()); typedef typename remove_const<typename remove_reference<decltype(obs.delta_s)>::type>::type mytype; if(obs.myname.compare(0, 10, "KondoCloud") == 0) writeFunctiontoFile(obs.fv, avsign.signcache, outRe, outIm, obs.functionpoints, obs.delta_s, IntIndex<mytype>()); else writeFunctiontoFile(obs.fv, avsign.signcache, outRe, outIm, obs.functionpoints, obs.delta_s, FPIndex<mytype>()); return; } template<> void Error_Analysis<std::valarray<double>, double>::analyze(const std::string& p, Observable<std::valarray<double>, double>& obs, AverageSign<double>& avsign) { std::ofstream outRe((p + obs.myname).c_str()); writeFunctiontoFile(obs.fv, avsign.signcache, outRe, obs.functionpoints, obs.delta_s); return; } template<class T, typename SignType> class Error_Analysis<std::valarray<std::valarray<T> >, SignType> { public: /** @param p the path to which we store @param obs the Observable @param avsign the AverageSign used for determining the value of the observable */ static void analyze(const std::string& p, Observable<std::valarray<std::valarray<T> >, SignType>& obs, AverageSign<SignType>& avsign); private: }; template<class T, typename SignType> void Error_Analysis<std::valarray<std::valarray<T> >, SignType>::analyze(const std::string& p, Observable<std::valarray<std::valarray<T> >, SignType>& obs, AverageSign<SignType>& avsign) { std::string outRe(p + obs.myname + string("_Re"));//create file for outputting the real part std::string outIm(p + obs.myname + string("_Im"));//create file for outputting the imaginary part writeVectorFunctiontoFile(obs.fv, obs.tensorindices, avsign.signcache, outRe, outIm, obs.functionpoints, obs.delta_s, obs.covariance);//well, write to files... } template<> void Error_Analysis<std::valarray<std::valarray<double> >, double>::analyze(const std::string& p, Observable<std::valarray<std::valarray<double> >, double>& obs, AverageSign<double>& avsign) { std::string outRe(p + obs.myname);//create file for outputting the real part writeVectorFunctiontoFile(obs.fv, obs.tensorindices, avsign.signcache, outRe, obs.functionpoints, obs.delta_s, obs.covariance);//well, write to files... } #endif
GB_reduce_to_vector.c
//------------------------------------------------------------------------------ // GB_reduce_to_vector: reduce a matrix to a vector using a binary op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // CALLS: GB_build // C<M> = accum (C,reduce(A)) where C is n-by-1. Reduces a matrix A or A' // to a vector. #include "GB_reduce.h" #include "GB_binop.h" #include "GB_build.h" #include "GB_ek_slice.h" #include "GB_accum_mask.h" #ifndef GBCOMPACT #include "GB_red__include.h" #endif #define GB_FREE_WORK \ { \ GB_FREE (Wfirst_space) ; \ GB_FREE (Wlast_space) ; \ GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_MATRIX_FREE (&T) ; \ } GrB_Info GB_reduce_to_vector // C<M> = accum (C,reduce(A)) ( GrB_Matrix C, // input/output for results, size n-by-1 const GrB_Matrix M, // optional M for C, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C,T) const GrB_BinaryOp reduce, // reduce operator for T=reduce(A) const GB_void *terminal, // for early exit (NULL if none) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc, // descriptor for C, M, and A GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // C may be aliased with M and/or A GB_RETURN_IF_NULL_OR_FAULTY (C) ; GB_RETURN_IF_FAULTY (M) ; GB_RETURN_IF_FAULTY (accum) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; GB_RETURN_IF_FAULTY (desc) ; ASSERT_MATRIX_OK (C, "C input for reduce_BinaryOp", GB0) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for reduce_BinaryOp", GB0) ; ASSERT_BINARYOP_OK_OR_NULL (accum, "accum for reduce_BinaryOp", GB0) ; ASSERT_BINARYOP_OK (reduce, "reduce for reduce_BinaryOp", GB0) ; ASSERT_MATRIX_OK (A, "A input for reduce_BinaryOp", GB0) ; ASSERT_DESCRIPTOR_OK_OR_NULL (desc, "desc for reduce_BinaryOp", GB0) ; GrB_Matrix T = NULL ; int ntasks = 0 ; size_t zsize = 0 ; int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ; GB_void *GB_RESTRICT Wfirst_space = NULL ; GB_void *GB_RESTRICT Wlast_space = NULL ; // get the descriptor GB_GET_DESCRIPTOR (info, desc, C_replace, Mask_comp, Mask_struct, A_transpose, xx1, xx2) ; // C and M are n-by-1 GrB_Vector objects, typecasted to GrB_Matrix ASSERT (GB_VECTOR_OK (C)) ; ASSERT (GB_IMPLIES (M != NULL, GB_VECTOR_OK (M))) ; // check domains and dimensions for C<M> = accum (C,T) GrB_Type ttype = reduce->ztype ; GB_OK (GB_compatible (C->type, C, M, accum, ttype, Context)) ; // check types of reduce if (reduce->xtype != reduce->ztype || reduce->ytype != reduce->ztype) { // all 3 types of z = reduce (x,y) must be the same. reduce must also // be associative but there is no way to check this in general. return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG, "All domains of reduction operator must be identical;\n" "operator is: [%s] = %s ([%s],[%s])", reduce->ztype->name, reduce->name, reduce->xtype->name, reduce->ytype->name))) ; } // T = reduce (T,A) must be compatible if (!GB_Type_compatible (A->type, reduce->ztype)) { return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG, "Incompatible type for reduction operator z=%s(x,y):\n" "input matrix A of type [%s]\n" "cannot be typecast to reduction operator of type [%s]", reduce->name, A->type->name, reduce->ztype->name))) ; } // check the dimensions int64_t n = GB_NROWS (C) ; if (A_transpose) { if (n != GB_NCOLS (A)) { return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG, "w=reduce(A'): length of w is " GBd ";\n" "it must match the number of columns of A, which is " GBd ".", n, GB_NCOLS (A)))) ; } } else { if (n != GB_NROWS(A)) { return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG, "w=reduce(A): length of w is " GBd ";\n" "it must match the number of rows of A, which is " GBd ".", n, GB_NROWS (A)))) ; } } // quick return if an empty mask is complemented GB_RETURN_IF_QUICK_MASK (C, C_replace, M, Mask_comp) ; //-------------------------------------------------------------------------- // delete any lingering zombies and assemble any pending tuples //-------------------------------------------------------------------------- GB_MATRIX_WAIT (M) ; GB_MATRIX_WAIT (A) ; //-------------------------------------------------------------------------- // handle the CSR/CSC format of A //-------------------------------------------------------------------------- // the result vector T is in CSC format if (!(A->is_csc)) { A_transpose = !A_transpose ; } //-------------------------------------------------------------------------- // T = reduce (A) or reduce (A') //-------------------------------------------------------------------------- // T is created below so that it can be typecasted to a GrB_Vector when // done: non-hypersparse n-by-1 matrix in CSC format. // T = reduce_to_vector (A) or reduce_to_vector (A'), which is T = sum (A') // or sum (A), in MATLAB notation, except where where 'sum' is any // associative operator. // By default, T(i) = op (A (i,:)) is a vector whose length is the same as // the number of rows of A. T(i) is the reduction of all entries in the // ith row of A. If A_transpose is true, the T is computed as if A were // transposed first, and thus its length is equal to the number of vectors // of the input matrix A. The use of A_transpose is the opposite of // MATLAB, since sum(A) in MATLAB sums up the columns of A, and sum(A') // sums up the rows of A.. // T is an n-by-1 GrB_Matrix that represents the vector. It is computed // as a GrB_Matrix so it can be passed to GB_accum_mask without // typecasting. ASSERT (n == ((A_transpose) ? A->vdim : A->vlen)) ; //-------------------------------------------------------------------------- // scalar workspace //-------------------------------------------------------------------------- size_t asize = A->type->size ; GB_Type_code acode = A->type->code ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_void *GB_RESTRICT Ax = (GB_void *) A->x ; int64_t anvec = A->nvec ; int64_t anz = GB_NNZ (A) ; zsize = reduce->ztype->size ; GB_Type_code zcode = reduce->ztype->code ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // T = reduce(A) or reduce(A') //-------------------------------------------------------------------------- GxB_binary_function freduce = reduce->function ; GB_cast_function cast_A_to_Z = GB_cast_factory (zcode, acode) ; bool nocasting = (A->type == reduce->ztype) ; if (A_transpose) { //---------------------------------------------------------------------- // T = reduce(A'), where T(j) = reduce (A (:,j)) //---------------------------------------------------------------------- // Each vector A(:,j) is reduced to the scalar T(j) //---------------------------------------------------------------------- // allocate T, including T->p, T->i, and T->x. T is not hypersparse. //---------------------------------------------------------------------- // since T is a GrB_Vector, it is CSC and not hypersparse GB_OK (GB_create (&T, ttype, n, 1, GB_Ap_calloc, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, anvec, true, Context)) ; ASSERT (GB_VECTOR_OK (T)) ; T->p [0] = 0 ; T->p [1] = anvec ; int64_t *GB_RESTRICT Ti = T->i ; GB_void *GB_RESTRICT Tx = (GB_void *) T->x ; T->nvec_nonempty = (anvec > 0) ? 1 : 0 ; T->magic = GB_MAGIC ; //---------------------------------------------------------------------- // symbolic phase //---------------------------------------------------------------------- // Construct the pattern of T. The kth vector in A creates one entry // in T, but it is flagged as a zombie if it is empty. int64_t nzombies = 0 ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ap = A->p ; int nth = GB_nthreads (anvec, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) \ reduction(+:nzombies) for (k = 0 ; k < anvec ; k++) { // if A(:,j) is empty, then the entry in T becomes a zombie int64_t j = (Ah == NULL) ? k : Ah [k] ; int64_t jnz = Ap [k+1] - Ap [k] ; if (jnz == 0) { // A(:,j) is empty: T(j) is a zombie Ti [k] = GB_FLIP (j) ; nzombies++ ; } else { // A(:,j) has at least one entry; T(j) is live Ti [k] = j ; } } if (A->nvec_nonempty < 0) { A->nvec_nonempty = anvec - nzombies ; } ASSERT (A->nvec_nonempty == (anvec - nzombies)) ; T->nzombies = nzombies ; //---------------------------------------------------------------------- // slice the entries of A for the numeric phase //---------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 // and vectors kfirst_slice [tid] to klast_slice [tid]. The first and // last vectors may be shared with prior slices and subsequent slices. ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, anz) ; ntasks = GB_IMAX (ntasks, 1) ; Wfirst_space = GB_MALLOC (ntasks * zsize, GB_void) ; Wlast_space = GB_MALLOC (ntasks * zsize, GB_void) ; if (Wfirst_space == NULL || Wlast_space == NULL || !GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks)) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // reduce to vector with built-in operators //---------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_red(opname,aname) GB_red_eachvec_ ## opname ## aname #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) ((atype *) Tx, A, \ kfirst_slice, klast_slice, pstart_slice, \ Wfirst_space, Wlast_space, ntasks, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; if (nocasting) { // controlled by opcode and typecode. No typecasting is done. GB_Opcode opcode = reduce->opcode ; GB_Type_code typecode = acode ; ASSERT (typecode <= GB_UDT_code) ; #include "GB_red_factory.c" } #endif //---------------------------------------------------------------------- // generic worker: with typecasting //---------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (A, "generic ") ; #define GB_ATYPE GB_void #define GB_CTYPE GB_void // ztype s ; #define GB_SCALAR(s) \ GB_void s [GB_VLA(zsize)] // ztype s = (ztype) Ax [p], with typecast #define GB_CAST_ARRAY_TO_SCALAR(s,Ax,p) \ cast_A_to_Z (s, Ax +((p)*asize), zsize) ; \ // s += (ztype) Ax [p], with typecast #define GB_ADD_CAST_ARRAY_TO_SCALAR(s, Ax, p) \ GB_void awork [GB_VLA(zsize)] ; \ cast_A_to_Z (awork, Ax +((p)*asize), zsize) ; \ freduce (s, s, awork) ; // W [k] = s, no typecast #define GB_COPY_SCALAR_TO_ARRAY(W,k,s) \ memcpy (W +((k)*zsize), s, zsize) ; // W [k] = S [i], no typecast #define GB_COPY_ARRAY_TO_ARRAY(W,k,S,i) \ memcpy (W +((k)*zsize), S +((i)*zsize), zsize) ; // W [k] += S [i], no typecast #define GB_ADD_ARRAY_TO_ARRAY(W,k,S,i) \ freduce (W +((k)*zsize), W +((k)*zsize), S +((i)*zsize)) ; // W [k] += s, no typecast #define GB_ADD_SCALAR_TO_ARRAY(W,k,s) \ freduce (W +((k)*zsize), W +((k)*zsize), s) ; // break if terminal value reached #define GB_BREAK_IF_TERMINAL(t) \ if (terminal != NULL) \ { \ if (memcmp (t, terminal, zsize) == 0) break ; \ } #include "GB_reduce_each_vector.c" } //---------------------------------------------------------------------- // wrapup: delete any zombies //---------------------------------------------------------------------- ASSERT_MATRIX_OK (T, "T before wait", GB_FLIP (GB0)) ; if (nzombies > 0) { ASSERT (GB_VECTOR_OK (T)) ; ASSERT (!GB_PENDING (T)) ; ASSERT (GB_ZOMBIES (T)) ; GB_OK (GB_Matrix_wait (T, Context)) ; } ASSERT_MATRIX_OK (T, "T output = reduce_each_vector (A)", GB0) ; } else { //---------------------------------------------------------------------- // T = reduce(A), where T(i) = reduce (A (i,:)) //---------------------------------------------------------------------- //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- // When A_transpose is false (after flipping it to account for the // CSR/CSC format), n is A->vlen, the vector length of A. This is // the number of rows of a CSC matrix, or the # of columns of a CSR // matrix. The matrix A itself requires O(vdim+anz) memory if // non-hypersparse and O(anz) if hypersparse. This does not depend on // A->vlen. So if the vector length is really huge (when anz << n), // the bucket method would fail. Thus, the qsort method, below, is // used when A is very sparse. if (GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, n)) { //------------------------------------------------------------------ // qsort method //------------------------------------------------------------------ // memory usage is O(anz) and time is O(anz*log(anz)). This is // more efficient than the bucket method, below, when A is very // hypersparse. The time and memory complexity does not depend // on n. // since T is a GrB_Vector, it is not hypersparse GB_OK (GB_new (&T, ttype, n, 1, GB_Ap_null, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, Context)) ; // GB_build treats Ai and Ax as read-only; they must not be modified GB_OK (GB_build ( T, // construct result in the T vector (GrB_Index *) Ai, // indices inside the vector NULL, // vector indices (none) Ax, // values, of size anz anz, // number of tuples reduce, // reduction operator acode, // type code of the Ax array false, // the input is a vector false, // indices do not need to be checked Context )) ; ASSERT (T->nvec_nonempty == GB_nvec_nonempty (T, NULL)) ; } else { //------------------------------------------------------------------ // bucket method //------------------------------------------------------------------ // Determine number of threads to use for constructing the buckets. // Each thread requires O(n) workspace, so this method does not // scale well when there are many threads compared to anz. Total // workspace is O(n*ntasks), so limit the # of threads used so that // at most anz workspace is used. Each thread takes a single task. ntasks = (n > 0) ? (anz / n) : 1 ; ntasks = GB_IMIN (ntasks, nthreads) ; ntasks = GB_IMAX (ntasks, 1) ; int nth = ntasks ; // one thread per task //------------------------------------------------------------------ // slice the entries for each thread //------------------------------------------------------------------ // Thread tid does entries pstart_slice [tid] to // pstart_slice [tid+1]-1. No need to compute kfirst or klast. pstart_slice = GB_MALLOC (ntasks+1, int64_t) ; if (pstart_slice == NULL) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } GB_eslice (pstart_slice, anz, ntasks) ; //------------------------------------------------------------------ // T(i) = reduce (A (i,:)), built-in operators //------------------------------------------------------------------ bool done = false ; #ifndef GBCOMPACT //-------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------- // Early exit cannot be exploited; ignore the terminal value. #undef GB_red #define GB_red(opname,aname) \ GB_red_eachindex_ ## opname ## aname #undef GB_RED_WORKER #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) (&T, ttype, A, pstart_slice, \ ntasks, nthreads, Context) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //-------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------- if (nocasting) { // controlled by opcode and typecode. No typecasting GB_Opcode opcode = reduce->opcode ; GB_Type_code typecode = acode ; ASSERT (typecode <= GB_UDT_code) ; #include "GB_red_factory.c" if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE)) { // out of memory GB_FREE_ALL ; return (info) ; } } #endif //------------------------------------------------------------------ // T(i) = reduce (A (i,:)), generic worker //------------------------------------------------------------------ if (!done) { // if this fails, the template frees all workspace with the // GB_FREE_ALL macro, defined above. GB_BURBLE_MATRIX (A, "generic ") ; #include "GB_reduce_each_index.c" } } ASSERT_MATRIX_OK (T, "T output for T = reduce_each_index (A)", GB0) ; } //-------------------------------------------------------------------------- // C<M> = accum (C,T): accumulate the results into C via the mask //-------------------------------------------------------------------------- GB_FREE_WORK ; return (GB_accum_mask (C, M, NULL, accum, &T, C_replace, Mask_comp, Mask_struct, Context)) ; }
dgemm_2_save.c
#define max(a,b) (((a) < (b))? (b) : (a)) #define min(a,b) (((a) < (b))? (a) : (b)) #define _TH_1 2 #include <omp.h> void dgemm_test(const int M,const int N,const int K,const double alpha,const double* A,const int lda,const double* B,const int ldb,const double beta,double* C,const int ldc) { int i; int j; int l; int j_bk_1; int j_bk_2; int i_bk_3; int l_bk_4; double _C_cp_0_0; double _C_cp_1_0; double _C_cp_2_0; double _C_cp_3_0; double _A_cp_0_0; double _B_cp_0_0; double _B_cp_1_0; double _B_cp_2_0; double _B_cp_3_0; omp_set_num_threads(_TH_1); #pragma omp parallel { /*@;BEGIN(nest1_group3=Nest)@*/#pragma omp for private(l,i,j,j_bk_1,j_bk_2,i_bk_3,l_bk_4,_C_cp_0_0,_C_cp_1_0,_C_cp_2_0,_C_cp_3_0,_A_cp_0_0,_B_cp_0_0,_B_cp_1_0,_B_cp_2_0,_B_cp_3_0) for (j_bk_1=0; j_bk_1<N; j_bk_1+=256) { /*@;BEGIN(nest1_group2=Nest)@*/for (j_bk_2=0; j_bk_2<-31+min(256,N-j_bk_1); j_bk_2+=32) { for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { if ((l_bk_4=0)<-31+K) { for (j=0; j<32; j+=4) { for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; l = 0; { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = beta*_C_cp_0_0; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = beta*_C_cp_1_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = beta*_C_cp_2_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = beta*_C_cp_3_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } for (l=1; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32) { /*@;BEGIN(nest1=Nest)@*/for (j=0; j<32; j+=4) { /*@;BEGIN(nest3=Nest)@*/for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; /*@;BEGIN(nest2=Nest)@*/for (l=0; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } if (l_bk_4<K) { for (j=0; j<32; j+=4) { for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; for (l=0; l<K-l_bk_4; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } } if (i_bk_3<M) { if ((l_bk_4=0)<-31+K) { for (j=0; j<32; j+=4) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; l = 0; { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = beta*_C_cp_0_0; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = beta*_C_cp_1_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = beta*_C_cp_2_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = beta*_C_cp_3_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } for (l=1; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32) { for (j=0; j<32; j+=4) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; for (l=0; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } if (l_bk_4<K) { for (j=0; j<32; j+=4) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; _C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))]; _C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))]; _C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))]; for (l=0; l<K-l_bk_4; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))]; _B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))]; _B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; _C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0; _C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0; _C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0; C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0; } } } } } if (j_bk_2<min(256,N-j_bk_1)) { for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { if ((l_bk_4=0)<-31+K) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; l = 0; { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = beta*_C_cp_0_0; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } for (l=1; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; for (l=0; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } if (l_bk_4<K) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<32; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; for (l=0; l<K-l_bk_4; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } } if (i_bk_3<M) { if ((l_bk_4=0)<-31+K) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; l = 0; { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = beta*_C_cp_0_0; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } for (l=1; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; for (l=0; l<32; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } if (l_bk_4<K) { for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { for (i=0; i<M-i_bk_3; i+=1) { _C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))]; for (l=0; l<K-l_bk_4; l+=1) { _B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))]; _A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))]; _C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0; } C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0; } } } } } } } }
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int kernel_w = 5; int kernel_h = 5; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_sse(bottom_blob, top_blob, _kernel, _bias, kernel_w, kernel_h, stride_w, stride_h, opt); }
distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd foo void test_no_clause() { int i; #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}} #pragma omp distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd firstprivate(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp distribute parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
parallel-nosuppression.c
/* * parallel-nosuppression.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-nosuppression | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { if (omp_get_thread_num() == 1) { var++; } } // implicit barrier var++; fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK: Warning: please export TSAN_OPTIONS // CHECK: DONE
par_relax.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.22 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "Common.h" #ifdef HYPRE_USING_ESSL #include <essl.h> #else HYPRE_Int hypre_F90_NAME_LAPACK(dgetrf, DGETRF) (HYPRE_Int *, HYPRE_Int *, double *, HYPRE_Int *, HYPRE_Int *, HYPRE_Int *); HYPRE_Int hypre_F90_NAME_LAPACK(dgetrs, DGETRS) (char *, HYPRE_Int *, HYPRE_Int *, double *, HYPRE_Int *, HYPRE_Int *, double *b, HYPRE_Int*, HYPRE_Int *); #endif /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, double relax_weight, double omega, double *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); double *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n_global= hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int first_index = hypre_ParVectorFirstIndex(u); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); double *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); double *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); double *Vtemp_data = hypre_VectorData(Vtemp_local); double *Vext_data = NULL; double *v_buf_data; double *tmp_data; hypre_Vector *Ztemp_local; double *Ztemp_data; hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; double *A_CSR_data; hypre_Vector *f_vector; double *f_vector_data; HYPRE_Int i, j, jr; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int column; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int num_recvs; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id, ip, p; HYPRE_Int vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; double *A_mat; double *b_vec; double zero = 0.0; double res, res0, res2; double one_minus_weight; double one_minus_omega; double prod; one_minus_weight = 1.0 - relax_weight; one_minus_omega = 1.0 - omega; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*----------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel , * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR * relax_type = 9 -> Direct Solve * relax_type = 99-> Direct solve: use gaussian elimination & BLAS (with pivoting) *-----------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 3: /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 1: /* Gauss-Seidel VERY SLOW */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, hypre_MPI_DOUBLE, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, hypre_MPI_DOUBLE, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); hypre_TFree(status); hypre_TFree(requests); } } break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, hypre_MPI_DOUBLE, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, hypre_MPI_DOUBLE, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); hypre_TFree(status); hypre_TFree(requests); } } break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(double,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(double,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(double,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(double,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/ SSOR on-processor with outer relaxation parameter */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 7: /* Jacobi (uses ParMatvec) */ { /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_ParVectorCopy(f,Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp=f-Au *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-1.0,A, u, 1.0, Vtemp); for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { u_data[i] += relax_weight * Vtemp_data[i] / A_diag_data[A_diag_i[i]]; } } } break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(double, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(double,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 9: /* Direct solve: use gaussian elimination */ { /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ #ifdef HYPRE_NO_GLOBAL_PARTITION /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (n) { #else if (n) { A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(double, n_global*n_global); b_vec = hypre_CTAlloc(double, n_global); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { column = A_CSR_j[jj]; A_mat[i*n_global+column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } relax_error = gselim(A_mat,b_vec,n_global); for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat); hypre_TFree(b_vec); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #ifdef HYPRE_NO_GLOBAL_PARTITION else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #endif } break; case 99: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */ { HYPRE_Int info; HYPRE_Int one_i = 1; HYPRE_Int *piv; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ #ifdef HYPRE_NO_GLOBAL_PARTITION /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (n) { #else if (n) { A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(double, n_global*n_global); b_vec = hypre_CTAlloc(double, n_global); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { /* need col major */ column = A_CSR_j[jj]; A_mat[i + n_global*column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } piv = hypre_CTAlloc(HYPRE_Int, n_global); /* write over A with LU */ #ifdef HYPRE_USING_ESSL dgetrf(n_global, n_global, A_mat, n_global, piv, &info); #else hypre_F90_NAME_LAPACK(dgetrf, DGETRF)(&n_global, &n_global, A_mat, &n_global, piv, &info); #endif /*now b_vec = inv(A)*b_vec */ #ifdef HYPRE_USING_ESSL dgetrs("N", n_global, &one_i, A_mat, n_global, piv, b_vec, n_global, &info); #else hypre_F90_NAME_LAPACK(dgetrs, DGETRS)("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info); #endif hypre_TFree(piv); for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat); hypre_TFree(b_vec); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #ifdef HYPRE_NO_GLOBAL_PARTITION else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #endif } break; } return(relax_error); } /*------------------------------------------------------------------------- * * Gaussian Elimination * *------------------------------------------------------------------------ */ HYPRE_Int hypre_GaussElimSetup (hypre_ParAMGData *amg_data, HYPRE_Int level, HYPRE_Int relax_type) { /* Par Data Structure variables */ hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level]; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); MPI_Comm new_comm; /* Generate sub communicator */ hypre_GenerateSubComm(comm, num_rows, &new_comm); if (num_rows) { hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); double *A_diag_data = hypre_CSRMatrixData(A_diag); double *A_offd_data = hypre_CSRMatrixData(A_offd); double *A_mat, *A_mat_local; HYPRE_Int *comm_info, *info, *displs; HYPRE_Int *mat_info, *mat_displs; HYPRE_Int new_num_procs, A_mat_local_size, i, jj, column; HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); hypre_MPI_Comm_size(new_comm, &new_num_procs); comm_info = hypre_CTAlloc(HYPRE_Int, 2*new_num_procs+1); mat_info = hypre_CTAlloc(HYPRE_Int, new_num_procs); mat_displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1); info = &comm_info[0]; displs = &comm_info[new_num_procs]; hypre_MPI_Allgather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm); displs[0] = 0; mat_displs[0] = 0; for (i=0; i < new_num_procs; i++) { displs[i+1] = displs[i]+info[i]; mat_displs[i+1] = global_num_rows*displs[i+1]; mat_info[i] = global_num_rows*info[i]; } hypre_ParAMGDataBVec(amg_data) = hypre_CTAlloc(double, global_num_rows); A_mat_local_size = global_num_rows*num_rows; A_mat_local = hypre_CTAlloc(double, A_mat_local_size); A_mat = hypre_CTAlloc(double, global_num_rows*global_num_rows); /* load local matrix into A_mat_local */ for (i = 0; i < num_rows; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { /* need col major */ column = A_diag_j[jj]+first_row_index; A_mat_local[i*global_num_rows + column] = A_diag_data[jj]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { /* need col major */ column = col_map_offd[A_offd_j[jj]]; A_mat_local[i*global_num_rows + column] = A_offd_data[jj]; } } hypre_MPI_Allgatherv( A_mat_local, A_mat_local_size, hypre_MPI_DOUBLE, A_mat, mat_info, mat_displs, hypre_MPI_DOUBLE, new_comm); if (relax_type == 99) { double *AT_mat; AT_mat = hypre_CTAlloc(double, global_num_rows*global_num_rows); for (i=0; i < global_num_rows; i++) for (jj=0; jj < global_num_rows; jj++) AT_mat[i*global_num_rows + jj] = A_mat[i+ jj*global_num_rows]; hypre_ParAMGDataAMat(amg_data) = AT_mat; hypre_TFree (A_mat); } else hypre_ParAMGDataAMat(amg_data) = A_mat; hypre_ParAMGDataCommInfo(amg_data) = comm_info; hypre_ParAMGDataNewComm(amg_data) = new_comm; hypre_TFree(mat_info); hypre_TFree(mat_displs); hypre_TFree(A_mat_local); } return hypre_error_flag; } HYPRE_Int hypre_GaussElimSolve (void *amg_vdata, HYPRE_Int level, HYPRE_Int relax_type) { hypre_ParAMGData *amg_data = amg_vdata; hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level]; HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int error_flag = 0; if (n) { MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data); hypre_ParVector *f = hypre_ParAMGDataFArray(amg_data)[level]; hypre_ParVector *u = hypre_ParAMGDataUArray(amg_data)[level]; double *A_mat = hypre_ParAMGDataAMat(amg_data); double *b_vec = hypre_ParAMGDataBVec(amg_data); double *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); double *A_tmp; HYPRE_Int *comm_info = hypre_ParAMGDataCommInfo(amg_data); HYPRE_Int *displs, *info; HYPRE_Int n_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int new_num_procs, i, my_info; HYPRE_Int first_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int one_i = 1; hypre_MPI_Comm_size(new_comm, &new_num_procs); info = &comm_info[0]; displs = &comm_info[new_num_procs]; hypre_MPI_Allgatherv ( f_data, n, hypre_MPI_DOUBLE, b_vec, info, displs, hypre_MPI_DOUBLE, new_comm ); A_tmp = hypre_CTAlloc (double, n_global*n_global); for (i=0; i < n_global*n_global; i++) A_tmp[i] = A_mat[i]; if (relax_type == 9) { error_flag = gselim(A_tmp,b_vec,n_global); } else if (relax_type == 99) /* use pivoting */ { HYPRE_Int *piv; piv = hypre_CTAlloc(HYPRE_Int, n_global); /* write over A with LU */ #ifdef HYPRE_USING_ESSL dgetrf(n_global, n_global, A_tmp, n_global, piv, &my_info); #else hypre_F90_NAME_LAPACK(dgetrf, DGETRF)(&n_global, &n_global, A_tmp, &n_global, piv, &my_info); #endif /*now b_vec = inv(A)*b_vec */ #ifdef HYPRE_USING_ESSL dgetrs("N", n_global, &one_i, A_tmp, n_global, piv, b_vec, n_global, &my_info); #else hypre_F90_NAME_LAPACK(dgetrs, DGETRS)("N", &n_global, &one_i, A_tmp, &n_global, piv, b_vec, &n_global, &my_info); #endif hypre_TFree(piv); } for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_tmp); } if (error_flag) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } HYPRE_Int gselim(A,x,n) double *A; double *x; HYPRE_Int n; { HYPRE_Int err_flag = 0; HYPRE_Int j,k,m; double factor; if (n==1) /* A is 1x1 */ { if (A[0] != 0.0) { x[0] = x[0]/A[0]; return(err_flag); } else { err_flag = 1; return(err_flag); } } else /* A is nxn. Forward elimination */ { for (k = 0; k < n-1; k++) { if (A[k*n+k] != 0.0) { for (j = k+1; j < n; j++) { if (A[j*n+k] != 0.0) { factor = A[j*n+k]/A[k*n+k]; for (m = k+1; m < n; m++) { A[j*n+m] -= factor * A[k*n+m]; } /* Elimination step for rhs */ x[j] -= factor * x[k]; } } } } /* Back Substitution */ for (k = n-1; k > 0; --k) { x[k] /= A[k*n+k]; for (j = 0; j < k; j++) { if (A[j*n+k] != 0.0) { x[j] -= x[k] * A[j*n+k]; } } } x[0] /= A[0]; return(err_flag); } }
flatsky_utils.c
#include "utils.h" #include <fitsio.h> void *dftw_malloc(size_t n) { #ifdef _SPREC void *p=fftwf_malloc(n); #else //_SPREC void *p=fftw_malloc(n); #endif //_SPREC if(p==NULL) report_error(NMT_ERROR_MEMORY,"Ran out of memory\n"); return p; } void dftw_free(void *p) { #ifdef _SPREC fftwf_free(p); #else //_SPREC fftw_free(p); #endif //_SPREC } void fs_map_product(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2,flouble *mp_out) { #pragma omp parallel default(none) \ shared(fs,mp1,mp2,mp_out) { long ip; #pragma omp for for(ip=0;ip<fs->npix;ip++) { mp_out[ip]=mp1[ip]*mp2[ip]; } //end omp for } //end omp parallel } flouble fs_map_dot(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2) { double sum=0; #pragma omp parallel default(none) \ shared(mp1,mp2,sum,fs) { long ip; double sum_thr=0; #pragma omp for for(ip=0;ip<fs->npix;ip++) { sum_thr+=mp1[ip]*mp2[ip]; } //end omp for #pragma omp critical { sum+=sum_thr; } //end omp critical } //end omp parallel return (flouble)(sum*fs->pixsize); } static void qu2eb(nmt_flatsky_info *fs,int spin,fcomplex **alm) { int sig_overall=-1; if(spin==0) sig_overall=1; #pragma omp parallel default(none) \ shared(fs,spin,alm,sig_overall) { int iy; fcomplex sig=sig_overall*cpow(I,spin); flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble csphi,ssphi,cph,sph; fcomplex e,b; int s=0; flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod2=kx*kx+ky*ky; if(kmod2<=0) { cph=1; sph=0; } else { flouble i_kmod=1./sqrt(kmod2); cph=kx*i_kmod; sph=ky*i_kmod; } csphi=1; ssphi=0; while(s<spin) { flouble c2=csphi*cph-ssphi*sph; flouble s2=ssphi*cph+csphi*sph; csphi=c2; ssphi=s2; s++; } e=sig*(alm[0][index]*csphi-alm[1][index]*ssphi); b=sig*(alm[0][index]*ssphi+alm[1][index]*csphi); alm[0][index]=e; alm[1][index]=b; } } //end omp for } //end omp parallel } static void eb2qu(nmt_flatsky_info *fs,int spin,fcomplex **alm) { int sig_overall=-1; if(spin==0) sig_overall=1; #pragma omp parallel default(none) \ shared(fs,spin,alm,sig_overall) { int iy; fcomplex sig=sig_overall*cpow(-I,spin); flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble csphi,ssphi,cph,sph; fcomplex q,u; int s=0; flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod2=kx*kx+ky*ky; if(kmod2<=0) { cph=1; sph=0; } else { flouble i_kmod=1./sqrt(kmod2); cph=kx*i_kmod; sph=ky*i_kmod; } csphi=1; ssphi=0; while(s<spin) { flouble c2=csphi*cph-ssphi*sph; flouble s2=ssphi*cph+csphi*sph; csphi=c2; ssphi=s2; s++; } q=sig*( alm[0][index]*csphi+alm[1][index]*ssphi); u=sig*(-alm[0][index]*ssphi+alm[1][index]*csphi); alm[0][index]=q; alm[1][index]=u; } } //end omp for } //end omp parallel } void fs_map2alm(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm) { //TODO init threads?? #ifdef _SPREC fftwf_plan plan_ft; #else //_SPREC fftw_plan plan_ft; #endif //_SPREC int imap,nmaps=1; if(spin) nmaps=2; for(imap=0;imap<nmaps*ntrans;imap++) { #ifdef _SPREC plan_ft=fftwf_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE); fftwf_execute(plan_ft); fftwf_destroy_plan(plan_ft); #else //_SPREC plan_ft=fftw_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE); fftw_execute(plan_ft); fftw_destroy_plan(plan_ft); #endif //_SPREC #pragma omp parallel default(none) \ shared(fs,alm,imap) { long ipix; flouble norm=fs->lx*fs->ly/(2*M_PI*fs->nx*fs->ny); #pragma omp for for(ipix=0;ipix<fs->ny*(fs->nx/2+1);ipix++) { alm[imap][ipix]*=norm; } //end omp for } //end omp parallel } if(nmaps>1) { //Q,U -> E,B for(imap=0;imap<ntrans*nmaps;imap+=nmaps) qu2eb(fs,spin,&(alm[imap])); } } void fs_alm2map(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm) { //TODO init threads?? #ifdef _SPREC fftwf_plan plan_ft; #else //_SPREC fftw_plan plan_ft; #endif //_SPREC int imap,nmaps=1; if(spin) nmaps=2; if(nmaps>1) { //E,B -> Q,U for(imap=0;imap<ntrans*nmaps;imap+=nmaps) eb2qu(fs,spin,&(alm[imap])); } for(imap=0;imap<nmaps*ntrans;imap++) { #ifdef _SPREC plan_ft=fftwf_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE); fftwf_execute(plan_ft); fftwf_destroy_plan(plan_ft); #else //_SPREC plan_ft=fftw_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE); fftw_execute(plan_ft); fftw_destroy_plan(plan_ft); #endif //_SPREC #pragma omp parallel default(none) \ shared(fs,map,imap) { long ipix; flouble norm=2*M_PI/(fs->lx*fs->ly); #pragma omp for for(ipix=0;ipix<fs->npix;ipix++) { map[imap][ipix]*=norm; } //end omp for } //end omp parallel } if(nmaps>1) { //Q,U -> E,B for(imap=0;imap<ntrans*nmaps;imap+=nmaps) qu2eb(fs,spin,&(alm[imap])); } } #define SAMP_RATE_SIGMA 128 #define FWHM2SIGMA_FLAT 0.00012352884853326381 nmt_k_function *fs_generate_beam_window(double fwhm_amin) { int ii; nmt_k_function *beam; flouble *larr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble)); flouble *farr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble)); double sigma=FWHM2SIGMA_FLAT*fwhm_amin; for(ii=0;ii<5*SAMP_RATE_SIGMA;ii++) { flouble l=(ii+0.0)/(SAMP_RATE_SIGMA*sigma); larr[ii]=l; farr[ii]=exp(-0.5*l*l*sigma*sigma); } beam=nmt_k_function_alloc(5*SAMP_RATE_SIGMA,larr,farr,1.,0.,0); free(larr); free(farr); return beam; } void fs_zero_alm(nmt_flatsky_info *fs,fcomplex *alm) { #pragma omp parallel default(none) \ shared(fs,alm) { int ii; #pragma omp for for(ii=0;ii<fs->ny*(fs->nx/2+1);ii++) { alm[ii]=0; } //end omp for } //end omp parallel } void fs_alter_alm(nmt_flatsky_info *fs,double fwhm_amin,fcomplex *alm_in,fcomplex *alm_out, nmt_k_function *window,int add_to_out) { nmt_k_function *beam; if(window==NULL) beam=fs_generate_beam_window(fwhm_amin); else beam=window; #pragma omp parallel default(none) \ shared(fs,alm_in,alm_out,beam,add_to_out) { int iy; flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; gsl_interp_accel *intacc_thr=gsl_interp_accel_alloc(); #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; for(ix=0;ix<=fs->nx/2;ix++) { flouble kx=ix*dkx; long index=ix+(fs->nx/2+1)*iy; flouble kmod=sqrt(kx*kx+ky*ky); if(add_to_out) alm_out[index]+=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr); else alm_out[index]=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr); } } //end omp for gsl_interp_accel_free(intacc_thr); } //end omp parallel if(window==NULL) nmt_k_function_free(beam); } void fs_alm2cl(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin, fcomplex **alms_1,fcomplex **alms_2,int pol_1,int pol_2,flouble **cls, flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y) { int i1,nmaps_1=1,nmaps_2=1; int *n_cells=my_malloc(bin->n_bands*sizeof(int)); if(pol_1) nmaps_1=2; if(pol_2) nmaps_2=2; for(i1=0;i1<nmaps_1;i1++) { int i2; fcomplex *alm1=alms_1[i1]; for(i2=0;i2<nmaps_2;i2++) { int il; fcomplex *alm2=alms_2[i2]; int index_cl=i2+nmaps_2*i1; flouble norm_factor=4*M_PI*M_PI/(fs->lx*fs->ly); for(il=0;il<bin->n_bands;il++) { cls[index_cl][il]=0; n_cells[il]=0; } #pragma omp parallel default(none) \ shared(fs,bin,alm1,alm2,index_cl,cls) \ shared(lmn_x,lmx_x,lmn_y,lmx_y,n_cells) { int iy; flouble dkx=2*M_PI/fs->lx; flouble dky=2*M_PI/fs->ly; #pragma omp for for(iy=0;iy<fs->ny;iy++) { int ix; flouble ky; int ik=0; if(2*iy<=fs->ny) ky=iy*dky; else ky=-(fs->ny-iy)*dky; if((ky>=lmn_y) && (ky<=lmx_y)) continue; for(ix=0;ix<fs->nx;ix++) { int ix_here; long index; flouble kmod,kx; if(2*ix<=fs->nx) { kx=ix*dkx; ix_here=ix; } else { kx=-(fs->nx-ix)*dkx; ix_here=fs->nx-ix; } if((kx>=lmn_x) && (kx<=lmx_x)) continue; index=ix_here+(fs->nx/2+1)*iy; kmod=sqrt(kx*kx+ky*ky); ik=nmt_bins_flat_search_fast(bin,kmod,ik); if(ik>=0) { #pragma omp atomic cls[index_cl][ik]+=(creal(alm1[index])*creal(alm2[index])+cimag(alm1[index])*cimag(alm2[index])); #pragma omp atomic n_cells[ik]++; } } } //end omp for } //end omp parallel for(il=0;il<bin->n_bands;il++) { if(n_cells[il]<=0) cls[index_cl][il]=0; else cls[index_cl][il]*=norm_factor/n_cells[il]; } } } free(n_cells); } void fs_anafast(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin, flouble **maps_1,flouble **maps_2,int pol_1,int pol_2,flouble **cls) { int i1; fcomplex **alms_1,**alms_2; int nmaps_1=1,nmaps_2=1; if(pol_1) nmaps_1=2; if(pol_2) nmaps_2=2; alms_1=my_malloc(nmaps_1*sizeof(fcomplex *)); for(i1=0;i1<nmaps_1;i1++) alms_1[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fs,1,2*pol_1,maps_1,alms_1); if(maps_1==maps_2) alms_2=alms_1; else { alms_2=my_malloc(nmaps_2*sizeof(fcomplex *)); for(i1=0;i1<nmaps_2;i1++) alms_2[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fs,1,2*pol_2,maps_2,alms_2); } fs_alm2cl(fs,bin,alms_1,alms_2,pol_1,pol_2,cls,1.,-1.,1.,-1.); for(i1=0;i1<nmaps_1;i1++) dftw_free(alms_1[i1]); free(alms_1); if(maps_1!=maps_2) { for(i1=0;i1<nmaps_2;i1++) dftw_free(alms_2[i1]); free(alms_2); } } fcomplex **fs_synalm(int nx,int ny,flouble lx,flouble ly,int nmaps, nmt_k_function **cells,nmt_k_function **beam,int seed) { int imap; fcomplex **alms; alms=my_malloc(nmaps*sizeof(fcomplex *)); for(imap=0;imap<nmaps;imap++) alms[imap]=dftw_malloc(ny*(nx/2+1)*sizeof(fcomplex)); //Switch off error handler for Cholesky decomposition gsl_error_handler_t *geh=gsl_set_error_handler_off(); int numthr=0; #pragma omp parallel default(none) \ shared(nx,ny,lx,ly,nmaps,cells,beam,seed,alms,numthr) { //This is to avoid using the omp.h library int ithr; #pragma omp critical { ithr=numthr; numthr++; } int iy; double dkx=2*M_PI/lx,dky=2*M_PI/ly; double inv_dkvol=1./(dkx*dky); gsl_vector *rv1=gsl_vector_alloc(nmaps); gsl_vector *iv1=gsl_vector_alloc(nmaps); gsl_vector *rv2=gsl_vector_alloc(nmaps); gsl_vector *iv2=gsl_vector_alloc(nmaps); gsl_matrix *clmat=gsl_matrix_calloc(nmaps,nmaps); gsl_vector *eval =gsl_vector_alloc(nmaps); gsl_matrix *evec =gsl_matrix_alloc(nmaps,nmaps); gsl_eigen_symmv_workspace *wsym=gsl_eigen_symmv_alloc(nmaps); unsigned int seed_thr=(unsigned int)(seed+ithr); gsl_rng *rng=init_rng(seed_thr); gsl_interp_accel *intacc_cells=gsl_interp_accel_alloc(); gsl_interp_accel *intacc_beam=gsl_interp_accel_alloc(); #pragma omp for for(iy=0;iy<ny;iy++) { int ix; flouble ky; if(2*iy<=ny) ky=iy*dky; else ky=-(ny-iy)*dky; for(ix=0;ix<=nx/2;ix++) { int imp1,imp2; flouble kx=ix*dkx; long index=ix+(nx/2+1)*iy; flouble kmod=sqrt(kx*kx+ky*ky); if(kmod<0) { for(imp1=0;imp1<nmaps;imp1++) alms[imp1][index]=0; } else { //Get power spectrum int icl=0; for(imp1=0;imp1<nmaps;imp1++) { for(imp2=imp1;imp2<nmaps;imp2++) {//Fill up only lower triangular part flouble cl=0.5*inv_dkvol*nmt_k_function_eval(cells[icl],kmod,intacc_cells); gsl_matrix_set(clmat,imp1,imp2,cl); if(imp2!=imp1) gsl_matrix_set(clmat,imp2,imp1,cl); icl++; } } //Take square root gsl_eigen_symmv(clmat,eval,evec,wsym); for(imp1=0;imp1<nmaps;imp1++) { double dr,di; //At the same time get white random numbers rng_gauss(rng,&dr,&di); gsl_vector_set(rv1,imp1,dr); gsl_vector_set(iv1,imp1,di); for(imp2=0;imp2<nmaps;imp2++) { double oij=gsl_matrix_get(evec,imp1,imp2); double lambda=gsl_vector_get(eval,imp2); if(lambda<=0) lambda=0; else lambda=sqrt(lambda); gsl_matrix_set(clmat,imp1,imp2,oij*lambda); } } //Get correlate random numbers gsl_blas_dgemv(CblasNoTrans,1.,clmat,rv1,0,rv2); gsl_blas_dgemv(CblasNoTrans,1.,clmat,iv1,0,iv2); for(imp1=0;imp1<nmaps;imp1++) { flouble bm=nmt_k_function_eval(beam[imp1],kmod,intacc_beam); flouble a_re=bm*gsl_vector_get(rv2,imp1); flouble a_im=bm*gsl_vector_get(iv2,imp1); if(ix==0) { if(iy>ny/2) continue; else { if(iy==0) alms[imp1][index]=(fcomplex)(M_SQRT2*a_re+I*0*a_im); else { int iyy=ny-iy; alms[imp1][index]=(fcomplex)(a_re+I*a_im); alms[imp1][ix+(nx/2+1)*iyy]=(fcomplex)(a_re-I*a_im); } } } else alms[imp1][index]=(fcomplex)(a_re+I*a_im); } } } } //omp end for gsl_vector_free(rv1); gsl_vector_free(iv1); gsl_vector_free(rv2); gsl_vector_free(iv2); gsl_matrix_free(clmat); gsl_vector_free(eval); gsl_matrix_free(evec); gsl_eigen_symmv_free(wsym); end_rng(rng); gsl_interp_accel_free(intacc_cells); gsl_interp_accel_free(intacc_beam); } //omp end parallel //Restore error handler gsl_set_error_handler(geh); return alms; } static void read_key(fitsfile *fptr,int dtype,char *key,void *val,int *status) { fits_read_key(fptr,dtype,key,val,NULL,status); if(*status) report_error(NMT_ERROR_READ,"Key %s not found\n",key); } flouble *fs_read_flat_map(char *fname,int *nx,int *ny,flouble *lx,flouble *ly,int nfield) { fitsfile *fptr; int numhdu,hdutype,naxis,naxis1,naxis2; double cdelt1,cdelt2; flouble nulval=-999; int status=0; fits_open_file(&fptr,fname,READONLY,&status); if(status) report_error(NMT_ERROR_FOPEN,"Can't open file %s\n",fname); fits_get_num_hdus(fptr,&numhdu,&status); if(nfield>=numhdu) report_error(NMT_ERROR_READ,"%d-th field doesn't exist\n",nfield); fits_movabs_hdu(fptr,nfield+1,&hdutype,&status); if(hdutype!=IMAGE_HDU) report_error(NMT_ERROR_READ,"Requested HDU is not an image\n"); //Read patch properties read_key(fptr,TINT,"NAXIS",&naxis,&status); read_key(fptr,TINT,"NAXIS1",&naxis1,&status); read_key(fptr,TINT,"NAXIS2",&naxis2,&status); read_key(fptr,TDOUBLE,"CDELT1",&cdelt1,&status); read_key(fptr,TDOUBLE,"CDELT2",&cdelt2,&status); if(naxis!=2) report_error(NMT_ERROR_READ,"Can't find a two-dimensional map\n"); *nx=naxis1; *ny=naxis2; *lx=fabs(naxis1*cdelt1)*M_PI/180; *ly=fabs(naxis2*cdelt2)*M_PI/180; //Read data long fpixel[2]={1,1}; flouble *map_out=my_malloc(naxis1*naxis2*sizeof(double)); #ifdef _SPREC fits_read_pix(fptr,TFLOAT,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status); #else //_SPREC fits_read_pix(fptr,TDOUBLE,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status); #endif //_SPREC if(status) report_error(NMT_ERROR_READ,"Error reading image from file %s\n",fname); fits_close_file(fptr,&status); return map_out; }
moments.c
/* Generated by Cython 0.25.1 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-O3", "-ffast-math", "-fopenmp" ], "extra_link_args": [ "-fopenmp" ] }, "module_name": "bmtools.exact.moments" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_25_1" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000) #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__bmtools__exact__moments #define __PYX_HAVE_API__bmtools__exact__moments #include "math.h" #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "bmtools/exact/moments.pyx", "stringsource", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* "bmtools/exact/helpers.pxd":7 * """ * * ctypedef signed char state_t # <<<<<<<<<<<<<< * cdef char* state_t_code * */ typedef signed char __pyx_t_7bmtools_5exact_7helpers_state_t; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const; /* "bmtools/exact/moments.pxd":31 * * * cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<< * state_t[:] state, long start_state_index=?, * long end_state_index=?) nogil */ struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const { int __pyx_n; long start_state_index; long end_state_index; }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":275 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *); /* None.proto */ static CYTHON_INLINE long __Pyx_pow_long(long, long); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_pow_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* FunctionExport.proto */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* VoidPtrImport.proto */ static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'bmtools.exact.helpers' */ static char **__pyx_vp_7bmtools_5exact_7helpers_state_t_code = 0; #define __pyx_v_7bmtools_5exact_7helpers_state_t_code (*__pyx_vp_7bmtools_5exact_7helpers_state_t_code) static double (*__pyx_f_7bmtools_5exact_7helpers_neg_energy)(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/ static void (*__pyx_f_7bmtools_5exact_7helpers_check_state_space_size)(int, int, int __pyx_skip_dispatch); /*proto*/ static __Pyx_memviewslice (*__pyx_f_7bmtools_5exact_7helpers_partition_state_space)(long, int); /*proto*/ static void (*__pyx_f_7bmtools_5exact_7helpers_index_to_state)(long, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static void (*__pyx_f_7bmtools_5exact_7helpers_next_state)(__Pyx_memviewslice, long); /*proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'bmtools.exact.moments' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, long, long); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__Pyx_memviewslice, double); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, __Pyx_memviewslice, long, long); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__Pyx_memviewslice, double); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__Pyx_memviewslice, double); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__Pyx_memviewslice, double); /*proto*/ static void __pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__Pyx_memviewslice, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t = { "state_t", NULL, sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_7bmtools_5exact_7helpers_state_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7bmtools_5exact_7helpers_state_t), 0 }; #define __Pyx_MODULE_NAME "bmtools.exact.moments" int __pyx_module_is_main_bmtools__exact__moments = 0; /* Implementation of 'bmtools.exact.moments' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_d[] = "d"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_t[] = "t"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_prob[] = "prob"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_force[] = "force"; static const char __pyx_k_probs[] = "probs"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_state[] = "state"; static const char __pyx_k_biases[] = "biases"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_states[] = "states"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_weights[] = "weights"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_first_mom[] = "first_mom"; static const char __pyx_k_intervals[] = "intervals"; static const char __pyx_k_num_units[] = "num_units"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_first_moms[] = "first_moms"; static const char __pyx_k_norm_const[] = "norm_const"; static const char __pyx_k_num_states[] = "num_states"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_second_mom[] = "second_mom"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_norm_consts[] = "norm_consts"; static const char __pyx_k_num_threads[] = "num_threads"; static const char __pyx_k_second_moms[] = "second_moms"; static const char __pyx_k_state_index[] = "state_index"; static const char __pyx_k_all_in_place[] = "all_in_place"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_bmtools_exact_moments[] = "bmtools.exact.moments"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_calculate_probs_parallel[] = "calculate_probs_parallel"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_calculate_moments_parallel[] = "calculate_moments_parallel"; static const char __pyx_k_Number_of_threads_must_be_0[] = "Number of threads must be > 0"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_calculate_moments_sequential[] = "calculate_moments_sequential"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_home_matt_Projects_boltzmann_ma[] = "/home/matt/Projects/boltzmann-machine-tools/bmtools/exact/moments.pyx"; static const char __pyx_k_Boltzmann_machine_moment_calcula[] = "Boltzmann machine moment calculation.\n\nFunctions for calculating first and second moments of Boltzmann machine\ninvariant distribution exactly given weight and bias parameters. Intended\nfor use only with small toy systems due to exponential scaling of\nsummations over state space with number of units. As well as a single\nthreaded sequential implementation a parallel implementation which can\nbe distributed over multiple compute units using OpenMP in a shared memory\narchitecture is provided.\n"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_kp_s_Number_of_threads_must_be_0; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_all_in_place; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_biases; static PyObject *__pyx_n_s_bmtools_exact_moments; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_calculate_moments_parallel; static PyObject *__pyx_n_s_calculate_moments_sequential; static PyObject *__pyx_n_s_calculate_probs_parallel; static PyObject *__pyx_n_s_class; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_first_mom; static PyObject *__pyx_n_s_first_moms; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_force; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_kp_s_home_matt_Projects_boltzmann_ma; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_intervals; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_norm_const; static PyObject *__pyx_n_s_norm_consts; static PyObject *__pyx_n_s_num_states; static PyObject *__pyx_n_s_num_threads; static PyObject *__pyx_n_s_num_units; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_prob; static PyObject *__pyx_n_s_probs; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_second_mom; static PyObject *__pyx_n_s_second_moms; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_state; static PyObject *__pyx_n_s_state_index; static PyObject *__pyx_n_s_states; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_t; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_weights; static PyObject *__pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force); /* proto */ static PyObject *__pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads, __Pyx_memviewslice __pyx_v_norm_consts, __Pyx_memviewslice __pyx_v_first_moms, __Pyx_memviewslice __pyx_v_second_moms); /* proto */ static PyObject *__pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static __Pyx_memviewslice __pyx_k_; static __Pyx_memviewslice __pyx_k__2; static __Pyx_memviewslice __pyx_k__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_slice__16; static PyObject *__pyx_slice__17; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; /* "bmtools/exact/moments.pyx":24 * double log(double x) nogil * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False): * """Calculate Boltzmann machine distribution moments. */ /* Python wrapper */ static PyObject *__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7bmtools_5exact_7moments_calculate_moments_sequential[] = "calculate_moments_sequential(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False)\nCalculate Boltzmann machine distribution moments.\n\n Calculates normalisation constant (~zeroth moment), first and second\n moments of Boltzmann machine invariant distribution specified by the\n provided weight and bias parameters. Moment calculations are done\n using a single thread iterating across all possible state configurations\n sequentially.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n\n Returns\n -------\n norm_const : double\n Sum of unnormalised probability terms across all states.\n first_mom : double[:]\n Expectation of state vector with respect to distribution.\n second_mom : double[:, :]\n Expectation of outer product of state vectors with respect to\n distribution.\n "; static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_1calculate_moments_sequential = {"calculate_moments_sequential", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_calculate_moments_sequential}; static PyObject *__pyx_pw_7bmtools_5exact_7moments_1calculate_moments_sequential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_force; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("calculate_moments_sequential (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_moments_sequential", 0, 2, 3, 1); __PYX_ERR(0, 24, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_moments_sequential") < 0)) __PYX_ERR(0, 24, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 24, __pyx_L3_error) __pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 24, __pyx_L3_error) if (values[2]) { __pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error) } else { /* "bmtools/exact/moments.pyx":25 * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, * bint force=False): # <<<<<<<<<<<<<< * """Calculate Boltzmann machine distribution moments. * */ __pyx_v_force = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("calculate_moments_sequential", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 24, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_sequential", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force); /* "bmtools/exact/moments.pyx":24 * double log(double x) nogil * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False): * """Calculate Boltzmann machine distribution moments. */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7bmtools_5exact_7moments_calculate_moments_sequential(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force) { int __pyx_v_num_units; CYTHON_UNUSED double __pyx_v_prob; double __pyx_v_norm_const; long __pyx_v_num_states; __Pyx_memviewslice __pyx_v_state = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_first_mom = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_second_mom = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_6 = NULL; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_RefNannySetupContext("calculate_moments_sequential", 0); /* "bmtools/exact/moments.pyx":56 * cdef long state_index * cdef int i, j * cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<< * cdef double prob = 0. * cdef double norm_const = 0. */ __pyx_v_num_units = (__pyx_v_weights.shape[0]); /* "bmtools/exact/moments.pyx":57 * cdef int i, j * cdef int num_units = weights.shape[0] * cdef double prob = 0. # <<<<<<<<<<<<<< * cdef double norm_const = 0. * cdef long num_states = 2**num_units */ __pyx_v_prob = 0.; /* "bmtools/exact/moments.pyx":58 * cdef int num_units = weights.shape[0] * cdef double prob = 0. * cdef double norm_const = 0. # <<<<<<<<<<<<<< * cdef long num_states = 2**num_units * check_state_space_size(num_units, force) */ __pyx_v_norm_const = 0.; /* "bmtools/exact/moments.pyx":59 * cdef double prob = 0. * cdef double norm_const = 0. * cdef long num_states = 2**num_units # <<<<<<<<<<<<<< * check_state_space_size(num_units, force) * cdef state_t[:] state = array( */ __pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units)); /* "bmtools/exact/moments.pyx":60 * cdef double norm_const = 0. * cdef long num_states = 2**num_units * check_state_space_size(num_units, force) # <<<<<<<<<<<<<< * cdef state_t[:] state = array( * shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) */ __pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0); /* "bmtools/exact/moments.pyx":62 * check_state_space_size(num_units, force) * cdef state_t[:] state = array( * shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) # <<<<<<<<<<<<<< * cdef double[:] first_mom = array( * shape=(num_units,), itemsize=sizeof(double), format='d') */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_shape, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_itemsize, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_format, __pyx_t_3) < 0) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "bmtools/exact/moments.pyx":61 * cdef long num_states = 2**num_units * check_state_space_size(num_units, force) * cdef state_t[:] state = array( # <<<<<<<<<<<<<< * shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] first_mom = array( */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_3); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_state = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "bmtools/exact/moments.pyx":64 * shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] first_mom = array( * shape=(num_units,), itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * cdef double[:, :] second_mom = array( * shape=(num_units, num_units), itemsize=sizeof(double), format='d') */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_t_2) < 0) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_itemsize, __pyx_t_2) < 0) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 64, __pyx_L1_error) /* "bmtools/exact/moments.pyx":63 * cdef state_t[:] state = array( * shape=(num_units,), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] first_mom = array( # <<<<<<<<<<<<<< * shape=(num_units,), itemsize=sizeof(double), format='d') * cdef double[:, :] second_mom = array( */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_first_mom = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "bmtools/exact/moments.pyx":66 * shape=(num_units,), itemsize=sizeof(double), format='d') * cdef double[:, :] second_mom = array( * shape=(num_units, num_units), itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * accum_moments_for_state_range(weights, biases, state, &norm_const, * first_mom, second_mom, 0, num_states) */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1); __pyx_t_3 = 0; __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_6) < 0) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_6) < 0) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 66, __pyx_L1_error) /* "bmtools/exact/moments.pyx":65 * cdef double[:] first_mom = array( * shape=(num_units,), itemsize=sizeof(double), format='d') * cdef double[:, :] second_mom = array( # <<<<<<<<<<<<<< * shape=(num_units, num_units), itemsize=sizeof(double), format='d') * accum_moments_for_state_range(weights, biases, state, &norm_const, */ __pyx_t_6 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_6); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_second_mom = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "bmtools/exact/moments.pyx":67 * cdef double[:, :] second_mom = array( * shape=(num_units, num_units), itemsize=sizeof(double), format='d') * accum_moments_for_state_range(weights, biases, state, &norm_const, # <<<<<<<<<<<<<< * first_mom, second_mom, 0, num_states) * normalise_first_moment(first_mom, norm_const) */ __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_v_state, (&__pyx_v_norm_const), __pyx_v_first_mom, __pyx_v_second_mom, 0, __pyx_v_num_states); /* "bmtools/exact/moments.pyx":69 * accum_moments_for_state_range(weights, biases, state, &norm_const, * first_mom, second_mom, 0, num_states) * normalise_first_moment(first_mom, norm_const) # <<<<<<<<<<<<<< * normalise_and_reflect_second_moment(second_mom, norm_const) * return norm_const, first_mom, second_mom */ __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__pyx_v_first_mom, __pyx_v_norm_const); /* "bmtools/exact/moments.pyx":70 * first_mom, second_mom, 0, num_states) * normalise_first_moment(first_mom, norm_const) * normalise_and_reflect_second_moment(second_mom, norm_const) # <<<<<<<<<<<<<< * return norm_const, first_mom, second_mom * */ __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__pyx_v_second_mom, __pyx_v_norm_const); /* "bmtools/exact/moments.pyx":71 * normalise_first_moment(first_mom, norm_const) * normalise_and_reflect_second_moment(second_mom, norm_const) * return norm_const, first_mom, second_mom # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyFloat_FromDouble(__pyx_v_norm_const); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_first_mom, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_second_mom, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_1); __pyx_t_6 = 0; __pyx_t_2 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "bmtools/exact/moments.pyx":24 * double log(double x) nogil * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False): * """Calculate Boltzmann machine distribution moments. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_XDECREF(__pyx_t_6); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_sequential", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_state, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_first_mom, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_second_mom, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "bmtools/exact/moments.pyx":74 * * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False, int num_threads=2, * double[:] norm_consts=None, */ /* Python wrapper */ static PyObject *__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7bmtools_5exact_7moments_2calculate_moments_parallel[] = "calculate_moments_parallel(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False, int num_threads=2, __Pyx_memviewslice norm_consts=None, __Pyx_memviewslice first_moms=None, __Pyx_memviewslice second_moms=None)\nCalculate Boltzmann machine distribution moments.\n\n Calculates normalisation constant (~zeroth moment), first and second\n moments of Boltzmann machine invariant distribution specified by the\n provided weight and bias parameters. Moment calculations are done in\n parallel using a specified number of thread each iterating across an\n equal partition of the state space.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n num_threads : int (default=2)\n Number of parallel threads to use.\n norm_consts : double[:], optional\n Allocated array to use in parallel normalisation constant\n calculation. Should have shape (num_threads,). Final value accumulated\n over all threads will be written to first entry. If not\n specified (or either of first_moms or second_moms not specidied) new\n array allocated and returned.\n first_moms : double[:, :], optional\n Allocated array to use in parallel first moment calculation. Should\n have shape (num_threads, num_units). Final value accumulated\n over all threads will be written to first element. If not specified\n (or either of norm_consts or second_moms not specified) new array\n allocated and first entry returned.\n second_moms : double[:, :, :], optional\n Allocated array to use in parallel second moment calculation. Should\n have shape (num_threads, num_units, num_units). Final values\n accum""ulated over all threads will be written to first entry. If not\n specified (or either of norm_consts or first_moms not specified) new\n array allocated and first entry returned.\n\n Returns\n -------\n **If no arrays specified for in-place calculation, otherwise no return**\n norm_const : double\n Sum of unnormalised probability terms across all states.\n first_mom : double[:]\n Expectation of state vector with respect to distribution.\n second_mom : double[:, :]\n Expectation of outer product of state vectors with respect to\n distribution.\n "; static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_3calculate_moments_parallel = {"calculate_moments_parallel", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_2calculate_moments_parallel}; static PyObject *__pyx_pw_7bmtools_5exact_7moments_3calculate_moments_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_force; int __pyx_v_num_threads; __Pyx_memviewslice __pyx_v_norm_consts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_first_moms = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_second_moms = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("calculate_moments_parallel (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,&__pyx_n_s_num_threads,&__pyx_n_s_norm_consts,&__pyx_n_s_first_moms,&__pyx_n_s_second_moms,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_moments_parallel", 0, 2, 7, 1); __PYX_ERR(0, 74, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_norm_consts); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_first_moms); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_second_moms); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_moments_parallel") < 0)) __PYX_ERR(0, 74, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 74, __pyx_L3_error) __pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 74, __pyx_L3_error) if (values[2]) { __pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) } else { /* "bmtools/exact/moments.pyx":75 * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, * bint force=False, int num_threads=2, # <<<<<<<<<<<<<< * double[:] norm_consts=None, * double[:, :] first_moms=None, */ __pyx_v_force = ((int)0); } if (values[3]) { __pyx_v_num_threads = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) } else { __pyx_v_num_threads = ((int)2); } if (values[4]) { __pyx_v_norm_consts = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[4]); if (unlikely(!__pyx_v_norm_consts.memview)) __PYX_ERR(0, 76, __pyx_L3_error) } else { __pyx_v_norm_consts = __pyx_k_; __PYX_INC_MEMVIEW(&__pyx_v_norm_consts, 1); } if (values[5]) { __pyx_v_first_moms = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[5]); if (unlikely(!__pyx_v_first_moms.memview)) __PYX_ERR(0, 77, __pyx_L3_error) } else { __pyx_v_first_moms = __pyx_k__2; __PYX_INC_MEMVIEW(&__pyx_v_first_moms, 1); } if (values[6]) { __pyx_v_second_moms = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(values[6]); if (unlikely(!__pyx_v_second_moms.memview)) __PYX_ERR(0, 78, __pyx_L3_error) } else { __pyx_v_second_moms = __pyx_k__3; __PYX_INC_MEMVIEW(&__pyx_v_second_moms, 1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("calculate_moments_parallel", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 74, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force, __pyx_v_num_threads, __pyx_v_norm_consts, __pyx_v_first_moms, __pyx_v_second_moms); /* "bmtools/exact/moments.pyx":74 * * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False, int num_threads=2, * double[:] norm_consts=None, */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7bmtools_5exact_7moments_2calculate_moments_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads, __Pyx_memviewslice __pyx_v_norm_consts, __Pyx_memviewslice __pyx_v_first_moms, __Pyx_memviewslice __pyx_v_second_moms) { int __pyx_v_t; int __pyx_v_num_units; CYTHON_UNUSED double __pyx_v_prob; long __pyx_v_num_states; __Pyx_memviewslice __pyx_v_states = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_all_in_place; __Pyx_memviewslice __pyx_v_intervals = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_10 = NULL; __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_12 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; Py_ssize_t __pyx_t_17; __Pyx_memviewslice __pyx_t_18 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; __Pyx_RefNannySetupContext("calculate_moments_parallel", 0); /* "bmtools/exact/moments.pyx":129 * """ * cdef int t * cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<< * cdef double prob = 0. * cdef long num_states = 2**num_units */ __pyx_v_num_units = (__pyx_v_weights.shape[0]); /* "bmtools/exact/moments.pyx":130 * cdef int t * cdef int num_units = weights.shape[0] * cdef double prob = 0. # <<<<<<<<<<<<<< * cdef long num_states = 2**num_units * if num_threads <= 0: */ __pyx_v_prob = 0.; /* "bmtools/exact/moments.pyx":131 * cdef int num_units = weights.shape[0] * cdef double prob = 0. * cdef long num_states = 2**num_units # <<<<<<<<<<<<<< * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') */ __pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units)); /* "bmtools/exact/moments.pyx":132 * cdef double prob = 0. * cdef long num_states = 2**num_units * if num_threads <= 0: # <<<<<<<<<<<<<< * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) */ __pyx_t_1 = ((__pyx_v_num_threads <= 0) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":133 * cdef long num_states = 2**num_units * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<< * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 133, __pyx_L1_error) /* "bmtools/exact/moments.pyx":132 * cdef double prob = 0. * cdef long num_states = 2**num_units * if num_threads <= 0: # <<<<<<<<<<<<<< * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) */ } /* "bmtools/exact/moments.pyx":134 * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) # <<<<<<<<<<<<<< * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), */ __pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0); /* "bmtools/exact/moments.pyx":136 * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), # <<<<<<<<<<<<<< * format=state_t_code) * # check if any arrays for in place updates not specified and if so */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "bmtools/exact/moments.pyx":137 * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), * format=state_t_code) # <<<<<<<<<<<<<< * # check if any arrays for in place updates not specified and if so * # initialise */ __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_t_5) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "bmtools/exact/moments.pyx":135 * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( # <<<<<<<<<<<<<< * shape=(num_threads, num_units), itemsize=sizeof(state_t), * format=state_t_code) */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 135, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_5); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 135, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_states = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "bmtools/exact/moments.pyx":140 * # check if any arrays for in place updates not specified and if so * # initialise * cdef bint all_in_place = True # <<<<<<<<<<<<<< * if norm_consts is None or first_moms is None or second_moms is None: * all_in_place = False */ __pyx_v_all_in_place = 1; /* "bmtools/exact/moments.pyx":141 * # initialise * cdef bint all_in_place = True * if norm_consts is None or first_moms is None or second_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * norm_consts = array(shape=(num_threads,), */ __pyx_t_7 = ((((PyObject *) __pyx_v_norm_consts.memview) == Py_None) != 0); if (!__pyx_t_7) { } else { __pyx_t_1 = __pyx_t_7; goto __pyx_L5_bool_binop_done; } __pyx_t_7 = ((((PyObject *) __pyx_v_first_moms.memview) == Py_None) != 0); if (!__pyx_t_7) { } else { __pyx_t_1 = __pyx_t_7; goto __pyx_L5_bool_binop_done; } __pyx_t_7 = ((((PyObject *) __pyx_v_second_moms.memview) == Py_None) != 0); __pyx_t_1 = __pyx_t_7; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":142 * cdef bint all_in_place = True * if norm_consts is None or first_moms is None or second_moms is None: * all_in_place = False # <<<<<<<<<<<<<< * norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') */ __pyx_v_all_in_place = 0; /* "bmtools/exact/moments.pyx":143 * if norm_consts is None or first_moms is None or second_moms is None: * all_in_place = False * norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * if first_moms is None: */ __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_shape, __pyx_t_4) < 0) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "bmtools/exact/moments.pyx":144 * all_in_place = False * norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * if first_moms is None: * all_in_place = False */ __pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 144, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_itemsize, __pyx_t_4) < 0) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 143, __pyx_L1_error) /* "bmtools/exact/moments.pyx":143 * if norm_consts is None or first_moms is None or second_moms is None: * all_in_place = False * norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * if first_moms is None: */ __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_4); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1); __pyx_v_norm_consts = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "bmtools/exact/moments.pyx":141 * # initialise * cdef bint all_in_place = True * if norm_consts is None or first_moms is None or second_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * norm_consts = array(shape=(num_threads,), */ } /* "bmtools/exact/moments.pyx":145 * norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') * if first_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * first_moms = array(shape=(num_threads, num_units), */ __pyx_t_1 = ((((PyObject *) __pyx_v_first_moms.memview) == Py_None) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":146 * itemsize=sizeof(double), format='d') * if first_moms is None: * all_in_place = False # <<<<<<<<<<<<<< * first_moms = array(shape=(num_threads, num_units), * itemsize=sizeof(double), format='d') */ __pyx_v_all_in_place = 0; /* "bmtools/exact/moments.pyx":147 * if first_moms is None: * all_in_place = False * first_moms = array(shape=(num_threads, num_units), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * if second_moms is None: */ __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_5 = 0; __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "bmtools/exact/moments.pyx":148 * all_in_place = False * first_moms = array(shape=(num_threads, num_units), * itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * if second_moms is None: * all_in_place = False */ __pyx_t_3 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_itemsize, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 147, __pyx_L1_error) /* "bmtools/exact/moments.pyx":147 * if first_moms is None: * all_in_place = False * first_moms = array(shape=(num_threads, num_units), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * if second_moms is None: */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_3); if (unlikely(!__pyx_t_9.memview)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_first_moms, 1); __pyx_v_first_moms = __pyx_t_9; __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; /* "bmtools/exact/moments.pyx":145 * norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') * if first_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * first_moms = array(shape=(num_threads, num_units), */ } /* "bmtools/exact/moments.pyx":149 * first_moms = array(shape=(num_threads, num_units), * itemsize=sizeof(double), format='d') * if second_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * second_moms = array(shape=(num_threads, num_units, num_units), */ __pyx_t_1 = ((((PyObject *) __pyx_v_second_moms.memview) == Py_None) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":150 * itemsize=sizeof(double), format='d') * if second_moms is None: * all_in_place = False # <<<<<<<<<<<<<< * second_moms = array(shape=(num_threads, num_units, num_units), * itemsize=sizeof(double), format='d') */ __pyx_v_all_in_place = 0; /* "bmtools/exact/moments.pyx":151 * if second_moms is None: * all_in_place = False * second_moms = array(shape=(num_threads, num_units, num_units), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * # partition state space in to equal sized sections to allocate to */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_2 = 0; __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_t_10) < 0) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "bmtools/exact/moments.pyx":152 * all_in_place = False * second_moms = array(shape=(num_threads, num_units, num_units), * itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * # partition state space in to equal sized sections to allocate to * # different parallel threads */ __pyx_t_10 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_itemsize, __pyx_t_10) < 0) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 151, __pyx_L1_error) /* "bmtools/exact/moments.pyx":151 * if second_moms is None: * all_in_place = False * second_moms = array(shape=(num_threads, num_units, num_units), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * # partition state space in to equal sized sections to allocate to */ __pyx_t_10 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_10); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_second_moms, 1); __pyx_v_second_moms = __pyx_t_11; __pyx_t_11.memview = NULL; __pyx_t_11.data = NULL; /* "bmtools/exact/moments.pyx":149 * first_moms = array(shape=(num_threads, num_units), * itemsize=sizeof(double), format='d') * if second_moms is None: # <<<<<<<<<<<<<< * all_in_place = False * second_moms = array(shape=(num_threads, num_units, num_units), */ } /* "bmtools/exact/moments.pyx":155 * # partition state space in to equal sized sections to allocate to * # different parallel threads * cdef long[:] intervals = partition_state_space(num_states, num_threads) # <<<<<<<<<<<<<< * # parallel loop over partitions of state space, with each thread * # accumulating moments for its assigned states into thread-specific */ __pyx_t_12 = __pyx_f_7bmtools_5exact_7helpers_partition_state_space(__pyx_v_num_states, __pyx_v_num_threads); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 155, __pyx_L1_error) __pyx_v_intervals = __pyx_t_12; __pyx_t_12.memview = NULL; __pyx_t_12.data = NULL; /* "bmtools/exact/moments.pyx":159 * # accumulating moments for its assigned states into thread-specific * # arrays * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * norm_consts[t] = 0. */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_13 = __pyx_v_num_threads; if (1 == 0) abort(); { int __pyx_parallel_temp0 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; __pyx_t_16 = 1; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_15 = (__pyx_t_13 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_15 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_17, __pyx_t_19, __pyx_t_20, __pyx_t_21) firstprivate(__pyx_t_18, __pyx_t_8, __pyx_t_9) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_16) #endif /* _OPENMP */ for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_15; __pyx_t_14++){ if (__pyx_parallel_why < 2) { __pyx_v_t = (int)(0 + 1 * __pyx_t_14); /* "bmtools/exact/moments.pyx":161 * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, * num_threads=num_threads): * norm_consts[t] = 0. # <<<<<<<<<<<<<< * accum_moments_for_state_range( * weights, biases, states[t], &norm_consts[t], first_moms[t], */ __pyx_t_17 = __pyx_v_t; *((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_17 * __pyx_v_norm_consts.strides[0]) )) = 0.; /* "bmtools/exact/moments.pyx":163 * norm_consts[t] = 0. * accum_moments_for_state_range( * weights, biases, states[t], &norm_consts[t], first_moms[t], # <<<<<<<<<<<<<< * second_moms[t], intervals[t], intervals[t+1]) * # accumulate normalisation constant terms calculated by each individual */ __pyx_t_18.data = __pyx_v_states.data; __pyx_t_18.memview = __pyx_v_states.memview; __PYX_INC_MEMVIEW(&__pyx_t_18, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_shape = __pyx_v_states.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_states.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 163, __pyx_L15_error) } __pyx_t_18.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_18.shape[0] = __pyx_v_states.shape[1]; __pyx_t_18.strides[0] = __pyx_v_states.strides[1]; __pyx_t_18.suboffsets[0] = -1; __pyx_t_19 = __pyx_v_t; __pyx_t_8.data = __pyx_v_first_moms.data; __pyx_t_8.memview = __pyx_v_first_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 163, __pyx_L15_error) } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1]; __pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_9.data = __pyx_v_second_moms.data; /* "bmtools/exact/moments.pyx":164 * accum_moments_for_state_range( * weights, biases, states[t], &norm_consts[t], first_moms[t], * second_moms[t], intervals[t], intervals[t+1]) # <<<<<<<<<<<<<< * # accumulate normalisation constant terms calculated by each individual * # thread to get overall value */ __pyx_t_9.memview = __pyx_v_second_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_9, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 164, __pyx_L15_error) } __pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1]; __pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1]; __pyx_t_9.suboffsets[0] = -1; __pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2]; __pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2]; __pyx_t_9.suboffsets[1] = -1; __pyx_t_20 = __pyx_v_t; __pyx_t_21 = (__pyx_v_t + 1); /* "bmtools/exact/moments.pyx":162 * num_threads=num_threads): * norm_consts[t] = 0. * accum_moments_for_state_range( # <<<<<<<<<<<<<< * weights, biases, states[t], &norm_consts[t], first_moms[t], * second_moms[t], intervals[t], intervals[t+1]) */ __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_t_18, (&(*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_19 * __pyx_v_norm_consts.strides[0]) )))), __pyx_t_8, __pyx_t_9, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_20 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_21 * __pyx_v_intervals.strides[0]) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_18, 0); __pyx_t_18.memview = NULL; __pyx_t_18.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_9, 0); __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; goto __pyx_L18; __pyx_L15_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L17; __pyx_L17:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_t; } __pyx_L18:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_18, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_t = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L11_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "bmtools/exact/moments.pyx":159 * # accumulating moments for its assigned states into thread-specific * # arrays * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * norm_consts[t] = 0. */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L12; } __pyx_L11_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L12:; } } /* "bmtools/exact/moments.pyx":167 * # accumulate normalisation constant terms calculated by each individual * # thread to get overall value * for t in range(1, num_threads): # <<<<<<<<<<<<<< * norm_consts[0] += norm_consts[t] * # if multiple threads used accumulate moment values calculated by each */ __pyx_t_15 = __pyx_v_num_threads; for (__pyx_t_14 = 1; __pyx_t_14 < __pyx_t_15; __pyx_t_14+=1) { __pyx_v_t = __pyx_t_14; /* "bmtools/exact/moments.pyx":168 * # thread to get overall value * for t in range(1, num_threads): * norm_consts[0] += norm_consts[t] # <<<<<<<<<<<<<< * # if multiple threads used accumulate moment values calculated by each * # thread before normalising (and for second moments filling in */ __pyx_t_22 = __pyx_v_t; __pyx_t_23 = 0; *((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_23 * __pyx_v_norm_consts.strides[0]) )) += (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_22 * __pyx_v_norm_consts.strides[0]) ))); } /* "bmtools/exact/moments.pyx":172 * # thread before normalising (and for second moments filling in * # diagonal and upper triangle values using symmetricity) * if num_threads > 1: # <<<<<<<<<<<<<< * combine_and_normalise_first_moments(first_moms, norm_consts[0]) * combine_normalise_and_reflect_second_moments(second_moms, */ __pyx_t_1 = ((__pyx_v_num_threads > 1) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":173 * # diagonal and upper triangle values using symmetricity) * if num_threads > 1: * combine_and_normalise_first_moments(first_moms, norm_consts[0]) # <<<<<<<<<<<<<< * combine_normalise_and_reflect_second_moments(second_moms, * norm_consts[0]) */ __pyx_t_24 = 0; __pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__pyx_v_first_moms, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_24 * __pyx_v_norm_consts.strides[0]) )))); /* "bmtools/exact/moments.pyx":175 * combine_and_normalise_first_moments(first_moms, norm_consts[0]) * combine_normalise_and_reflect_second_moments(second_moms, * norm_consts[0]) # <<<<<<<<<<<<<< * else: * normalise_first_moment(first_moms[0], norm_consts[0]) */ __pyx_t_25 = 0; /* "bmtools/exact/moments.pyx":174 * if num_threads > 1: * combine_and_normalise_first_moments(first_moms, norm_consts[0]) * combine_normalise_and_reflect_second_moments(second_moms, # <<<<<<<<<<<<<< * norm_consts[0]) * else: */ __pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__pyx_v_second_moms, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_25 * __pyx_v_norm_consts.strides[0]) )))); /* "bmtools/exact/moments.pyx":172 * # thread before normalising (and for second moments filling in * # diagonal and upper triangle values using symmetricity) * if num_threads > 1: # <<<<<<<<<<<<<< * combine_and_normalise_first_moments(first_moms, norm_consts[0]) * combine_normalise_and_reflect_second_moments(second_moms, */ goto __pyx_L21; } /* "bmtools/exact/moments.pyx":177 * norm_consts[0]) * else: * normalise_first_moment(first_moms[0], norm_consts[0]) # <<<<<<<<<<<<<< * normalise_and_reflect_second_moment(second_moms[0], norm_consts[0]) * # only return values if not all arrays update in place */ /*else*/ { __pyx_t_8.data = __pyx_v_first_moms.data; __pyx_t_8.memview = __pyx_v_first_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 177, __pyx_L1_error) } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1]; __pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_26 = 0; __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__pyx_t_8, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_26 * __pyx_v_norm_consts.strides[0]) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "bmtools/exact/moments.pyx":178 * else: * normalise_first_moment(first_moms[0], norm_consts[0]) * normalise_and_reflect_second_moment(second_moms[0], norm_consts[0]) # <<<<<<<<<<<<<< * # only return values if not all arrays update in place * if not all_in_place: */ __pyx_t_9.data = __pyx_v_second_moms.data; __pyx_t_9.memview = __pyx_v_second_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_9, 0); { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 178, __pyx_L1_error) } __pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1]; __pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1]; __pyx_t_9.suboffsets[0] = -1; __pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2]; __pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2]; __pyx_t_9.suboffsets[1] = -1; __pyx_t_27 = 0; __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__pyx_t_9, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_27 * __pyx_v_norm_consts.strides[0]) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 1); __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; } __pyx_L21:; /* "bmtools/exact/moments.pyx":180 * normalise_and_reflect_second_moment(second_moms[0], norm_consts[0]) * # only return values if not all arrays update in place * if not all_in_place: # <<<<<<<<<<<<<< * return norm_consts[0], first_moms[0], second_moms[0] * */ __pyx_t_1 = ((!(__pyx_v_all_in_place != 0)) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":181 * # only return values if not all arrays update in place * if not all_in_place: * return norm_consts[0], first_moms[0], second_moms[0] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_28 = 0; __pyx_t_10 = PyFloat_FromDouble((*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_28 * __pyx_v_norm_consts.strides[0]) )))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_8.data = __pyx_v_first_moms.data; __pyx_t_8.memview = __pyx_v_first_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_8, 0); { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_first_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_first_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 181, __pyx_L1_error) } __pyx_t_8.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_8.shape[0] = __pyx_v_first_moms.shape[1]; __pyx_t_8.strides[0] = __pyx_v_first_moms.strides[1]; __pyx_t_8.suboffsets[0] = -1; __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_8, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; __pyx_t_9.data = __pyx_v_second_moms.data; __pyx_t_9.memview = __pyx_v_second_moms.memview; __PYX_INC_MEMVIEW(&__pyx_t_9, 0); { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_second_moms.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_second_moms.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 181, __pyx_L1_error) } __pyx_t_9.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_9.shape[0] = __pyx_v_second_moms.shape[1]; __pyx_t_9.strides[0] = __pyx_v_second_moms.strides[1]; __pyx_t_9.suboffsets[0] = -1; __pyx_t_9.shape[1] = __pyx_v_second_moms.shape[2]; __pyx_t_9.strides[1] = __pyx_v_second_moms.strides[2]; __pyx_t_9.suboffsets[1] = -1; __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_t_9, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 1); __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __pyx_t_10 = 0; __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "bmtools/exact/moments.pyx":180 * normalise_and_reflect_second_moment(second_moms[0], norm_consts[0]) * # only return values if not all arrays update in place * if not all_in_place: # <<<<<<<<<<<<<< * return norm_consts[0], first_moms[0], second_moms[0] * */ } /* "bmtools/exact/moments.pyx":74 * * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False, int num_threads=2, * double[:] norm_consts=None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 1); __Pyx_XDECREF(__pyx_t_10); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_18, 1); __Pyx_AddTraceback("bmtools.exact.moments.calculate_moments_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_states, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_intervals, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_first_moms, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_second_moms, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "bmtools/exact/moments.pyx":184 * * * def calculate_probs_parallel( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, bint force=False, * int num_threads=2,): */ /* Python wrapper */ static PyObject *__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7bmtools_5exact_7moments_4calculate_probs_parallel[] = "calculate_probs_parallel(__Pyx_memviewslice weights, __Pyx_memviewslice biases, bool force=False, int num_threads=2)\nCalculate BM distribution probabilities using parallel implementation.\n\n Calculates the probabilities of all signed binary states in according to a\n Boltzmann machine invariant distribution specified by the provided weight\n and bias parameters. Calculation is done by exahustive iteration over the\n 2**num_units state space so should only be attempted for moderate\n dimensionalities. Calculations are done in parallel using a specified number\n of thread each iterating across an equal partition of the state space.\n\n Parameters\n ----------\n weights : double[:, :]\n Matrix of weight parameters. Should be symmetric and zero-diagonal.\n biases : double[:]\n Vector of bias parameters.\n force : bool (bint)\n Flag to override forced exit when number of units is more than 20\n due to large size of state space.\n num_threads : int\n Number of parallel threads to use.\n\n Returns\n -------\n probs : double[:]\n Array of normalised probabilities for all states.\n norm_const : double\n Sum of unnormalised probability terms across all states.\n "; static PyMethodDef __pyx_mdef_7bmtools_5exact_7moments_5calculate_probs_parallel = {"calculate_probs_parallel", (PyCFunction)__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7bmtools_5exact_7moments_4calculate_probs_parallel}; static PyObject *__pyx_pw_7bmtools_5exact_7moments_5calculate_probs_parallel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_biases = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_force; int __pyx_v_num_threads; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("calculate_probs_parallel (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_weights,&__pyx_n_s_biases,&__pyx_n_s_force,&__pyx_n_s_num_threads,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_biases)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_probs_parallel", 0, 2, 4, 1); __PYX_ERR(0, 184, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_force); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calculate_probs_parallel") < 0)) __PYX_ERR(0, 184, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 185, __pyx_L3_error) __pyx_v_biases = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[1]); if (unlikely(!__pyx_v_biases.memview)) __PYX_ERR(0, 185, __pyx_L3_error) if (values[2]) { __pyx_v_force = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_force == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 185, __pyx_L3_error) } else { /* "bmtools/exact/moments.pyx":185 * * def calculate_probs_parallel( * double[:, :] weights, double[:] biases, bint force=False, # <<<<<<<<<<<<<< * int num_threads=2,): * """Calculate BM distribution probabilities using parallel implementation. */ __pyx_v_force = ((int)0); } if (values[3]) { __pyx_v_num_threads = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 186, __pyx_L3_error) } else { __pyx_v_num_threads = ((int)2); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("calculate_probs_parallel", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 184, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("bmtools.exact.moments.calculate_probs_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(__pyx_self, __pyx_v_weights, __pyx_v_biases, __pyx_v_force, __pyx_v_num_threads); /* "bmtools/exact/moments.pyx":184 * * * def calculate_probs_parallel( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, bint force=False, * int num_threads=2,): */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7bmtools_5exact_7moments_4calculate_probs_parallel(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, int __pyx_v_force, int __pyx_v_num_threads) { int __pyx_v_t; int __pyx_v_num_units; long __pyx_v_num_states; __Pyx_memviewslice __pyx_v_states = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_norm_consts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_probs = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_intervals = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; __Pyx_memviewslice __pyx_t_14 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; int __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; __Pyx_RefNannySetupContext("calculate_probs_parallel", 0); /* "bmtools/exact/moments.pyx":216 * """ * cdef int t * cdef int num_units = weights.shape[0] # <<<<<<<<<<<<<< * cdef long num_states = 2 ** num_units * if num_threads <= 0: */ __pyx_v_num_units = (__pyx_v_weights.shape[0]); /* "bmtools/exact/moments.pyx":217 * cdef int t * cdef int num_units = weights.shape[0] * cdef long num_states = 2 ** num_units # <<<<<<<<<<<<<< * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') */ __pyx_v_num_states = __Pyx_pow_long(2, ((long)__pyx_v_num_units)); /* "bmtools/exact/moments.pyx":218 * cdef int num_units = weights.shape[0] * cdef long num_states = 2 ** num_units * if num_threads <= 0: # <<<<<<<<<<<<<< * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) */ __pyx_t_1 = ((__pyx_v_num_threads <= 0) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":219 * cdef long num_states = 2 ** num_units * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<< * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 219, __pyx_L1_error) /* "bmtools/exact/moments.pyx":218 * cdef int num_units = weights.shape[0] * cdef long num_states = 2 ** num_units * if num_threads <= 0: # <<<<<<<<<<<<<< * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) */ } /* "bmtools/exact/moments.pyx":220 * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) # <<<<<<<<<<<<<< * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) */ __pyx_f_7bmtools_5exact_7helpers_check_state_space_size(__pyx_v_num_units, __pyx_v_force, 0); /* "bmtools/exact/moments.pyx":222 * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) # <<<<<<<<<<<<<< * cdef double[:] norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_num_units); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_shape, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_FromSize_t((sizeof(__pyx_t_7bmtools_5exact_7helpers_state_t))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_itemsize, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_7bmtools_5exact_7helpers_state_t_code); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_t_5) < 0) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "bmtools/exact/moments.pyx":221 * raise ValueError('Number of threads must be > 0') * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( # <<<<<<<<<<<<<< * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] norm_consts = array(shape=(num_threads,), */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(__pyx_t_5); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 221, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_states = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "bmtools/exact/moments.pyx":223 * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * cdef double[:] probs = array( */ __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_threads); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_shape, __pyx_t_4) < 0) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "bmtools/exact/moments.pyx":224 * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') # <<<<<<<<<<<<<< * cdef double[:] probs = array( * shape=(num_states,), itemsize=sizeof(double), format='d' */ __pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_itemsize, __pyx_t_4) < 0) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 223, __pyx_L1_error) /* "bmtools/exact/moments.pyx":223 * cdef state_t[:, :] states = array( * shape=(num_threads, num_units), itemsize=sizeof(state_t), format=state_t_code) * cdef double[:] norm_consts = array(shape=(num_threads,), # <<<<<<<<<<<<<< * itemsize=sizeof(double), format='d') * cdef double[:] probs = array( */ __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_4); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_norm_consts = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "bmtools/exact/moments.pyx":226 * itemsize=sizeof(double), format='d') * cdef double[:] probs = array( * shape=(num_states,), itemsize=sizeof(double), format='d' # <<<<<<<<<<<<<< * ) * # partition state space in to equal sized sections to allocate to */ __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_long(__pyx_v_num_states); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_t_2) < 0) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_itemsize, __pyx_t_2) < 0) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_format, __pyx_n_s_d) < 0) __PYX_ERR(0, 226, __pyx_L1_error) /* "bmtools/exact/moments.pyx":225 * cdef double[:] norm_consts = array(shape=(num_threads,), * itemsize=sizeof(double), format='d') * cdef double[:] probs = array( # <<<<<<<<<<<<<< * shape=(num_states,), itemsize=sizeof(double), format='d' * ) */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 225, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 225, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_probs = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "bmtools/exact/moments.pyx":230 * # partition state space in to equal sized sections to allocate to * # different parallel threads * cdef long[:] intervals = partition_state_space(num_states, num_threads) # <<<<<<<<<<<<<< * # parallel loop over partitions of state space, with each thread * # calculating probabilities for its assigned states into thread-specific */ __pyx_t_8 = __pyx_f_7bmtools_5exact_7helpers_partition_state_space(__pyx_v_num_states, __pyx_v_num_threads); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 230, __pyx_L1_error) __pyx_v_intervals = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "bmtools/exact/moments.pyx":234 * # calculating probabilities for its assigned states into thread-specific * # slice of probs array * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * norm_consts[t] = 0. */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_9 = __pyx_v_num_threads; if (1 == 0) abort(); { int __pyx_parallel_temp0 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; __pyx_t_12 = 1; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_11 = (__pyx_t_9 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_11 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_13, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20) firstprivate(__pyx_t_14, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_12) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){ if (__pyx_parallel_why < 2) { __pyx_v_t = (int)(0 + 1 * __pyx_t_10); /* "bmtools/exact/moments.pyx":236 * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, * num_threads=num_threads): * norm_consts[t] = 0. # <<<<<<<<<<<<<< * calc_unnormed_probs_for_state_range( * weights, biases, states[t], &norm_consts[t], */ __pyx_t_13 = __pyx_v_t; *((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_13 * __pyx_v_norm_consts.strides[0]) )) = 0.; /* "bmtools/exact/moments.pyx":238 * norm_consts[t] = 0. * calc_unnormed_probs_for_state_range( * weights, biases, states[t], &norm_consts[t], # <<<<<<<<<<<<<< * probs[intervals[t]:intervals[t+1]], * intervals[t], intervals[t+1]) */ __pyx_t_14.data = __pyx_v_states.data; __pyx_t_14.memview = __pyx_v_states.memview; __PYX_INC_MEMVIEW(&__pyx_t_14, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_shape = __pyx_v_states.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_states.strides[0]; if (0 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (0 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 238, __pyx_L9_error) } __pyx_t_14.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_14.shape[0] = __pyx_v_states.shape[1]; __pyx_t_14.strides[0] = __pyx_v_states.strides[1]; __pyx_t_14.suboffsets[0] = -1; __pyx_t_15 = __pyx_v_t; /* "bmtools/exact/moments.pyx":239 * calc_unnormed_probs_for_state_range( * weights, biases, states[t], &norm_consts[t], * probs[intervals[t]:intervals[t+1]], # <<<<<<<<<<<<<< * intervals[t], intervals[t+1]) * # accumulate normalisation constant terms calculated by each individual */ __pyx_t_16 = __pyx_v_t; __pyx_t_17 = (__pyx_v_t + 1); __pyx_t_7.data = __pyx_v_probs.data; __pyx_t_7.memview = __pyx_v_probs.memview; __PYX_INC_MEMVIEW(&__pyx_t_7, 0); __pyx_t_18 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_7, __pyx_v_probs.shape[0], __pyx_v_probs.strides[0], __pyx_v_probs.suboffsets[0], 0, 0, &__pyx_t_18, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_16 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_17 * __pyx_v_intervals.strides[0]) ))), 0, 1, 1, 0, 1) < 0)) { __PYX_ERR(0, 239, __pyx_L9_error) } __pyx_t_19 = __pyx_v_t; /* "bmtools/exact/moments.pyx":240 * weights, biases, states[t], &norm_consts[t], * probs[intervals[t]:intervals[t+1]], * intervals[t], intervals[t+1]) # <<<<<<<<<<<<<< * # accumulate normalisation constant terms calculated by each individual * # thread to get overall value */ __pyx_t_20 = (__pyx_v_t + 1); /* "bmtools/exact/moments.pyx":237 * num_threads=num_threads): * norm_consts[t] = 0. * calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<< * weights, biases, states[t], &norm_consts[t], * probs[intervals[t]:intervals[t+1]], */ __pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__pyx_v_weights, __pyx_v_biases, __pyx_t_14, (&(*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_15 * __pyx_v_norm_consts.strides[0]) )))), __pyx_t_7, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_19 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_20 * __pyx_v_intervals.strides[0]) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_14, 0); __pyx_t_14.memview = NULL; __pyx_t_14.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_7, 0); __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; goto __pyx_L12; __pyx_L9_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L11; __pyx_L11:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_t; } __pyx_L12:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_14, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_t = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L5_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "bmtools/exact/moments.pyx":234 * # calculating probabilities for its assigned states into thread-specific * # slice of probs array * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * norm_consts[t] = 0. */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L6; } __pyx_L5_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L6:; } } /* "bmtools/exact/moments.pyx":243 * # accumulate normalisation constant terms calculated by each individual * # thread to get overall value * for t in range(1, num_threads): # <<<<<<<<<<<<<< * norm_consts[0] += norm_consts[t] * # normalise probabilities by dividing through by normalisation constant */ __pyx_t_11 = __pyx_v_num_threads; for (__pyx_t_10 = 1; __pyx_t_10 < __pyx_t_11; __pyx_t_10+=1) { __pyx_v_t = __pyx_t_10; /* "bmtools/exact/moments.pyx":244 * # thread to get overall value * for t in range(1, num_threads): * norm_consts[0] += norm_consts[t] # <<<<<<<<<<<<<< * # normalise probabilities by dividing through by normalisation constant * # in parallel over multiple threads */ __pyx_t_21 = __pyx_v_t; __pyx_t_22 = 0; *((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_22 * __pyx_v_norm_consts.strides[0]) )) += (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_21 * __pyx_v_norm_consts.strides[0]) ))); } /* "bmtools/exact/moments.pyx":247 * # normalise probabilities by dividing through by normalisation constant * # in parallel over multiple threads * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * normalise_probabilities(probs[intervals[t]:intervals[t+1]], */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_11 = __pyx_v_num_threads; if (1 == 0) abort(); { int __pyx_parallel_temp0 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; __pyx_t_9 = 1; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_11 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_num_threads) private(__pyx_t_18, __pyx_t_23, __pyx_t_24, __pyx_t_25) firstprivate(__pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) schedule(static, __pyx_t_9) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_12; __pyx_t_10++){ if (__pyx_parallel_why < 2) { __pyx_v_t = (int)(0 + 1 * __pyx_t_10); /* "bmtools/exact/moments.pyx":249 * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, * num_threads=num_threads): * normalise_probabilities(probs[intervals[t]:intervals[t+1]], # <<<<<<<<<<<<<< * norm_consts[0]) * return probs, norm_consts[0] */ __pyx_t_23 = __pyx_v_t; __pyx_t_24 = (__pyx_v_t + 1); __pyx_t_7.data = __pyx_v_probs.data; __pyx_t_7.memview = __pyx_v_probs.memview; __PYX_INC_MEMVIEW(&__pyx_t_7, 0); __pyx_t_18 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_7, __pyx_v_probs.shape[0], __pyx_v_probs.strides[0], __pyx_v_probs.suboffsets[0], 0, 0, &__pyx_t_18, (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_23 * __pyx_v_intervals.strides[0]) ))), (*((long *) ( /* dim=0 */ (__pyx_v_intervals.data + __pyx_t_24 * __pyx_v_intervals.strides[0]) ))), 0, 1, 1, 0, 1) < 0)) { __PYX_ERR(0, 249, __pyx_L20_error) } __pyx_t_25 = 0; /* "bmtools/exact/moments.pyx":250 * num_threads=num_threads): * normalise_probabilities(probs[intervals[t]:intervals[t+1]], * norm_consts[0]) # <<<<<<<<<<<<<< * return probs, norm_consts[0] * */ __pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__pyx_t_7, (*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_25 * __pyx_v_norm_consts.strides[0]) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 0); __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; goto __pyx_L23; __pyx_L20_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L22; __pyx_L22:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates2) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_t; } __pyx_L23:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __PYX_XDEC_MEMVIEW(&__pyx_t_7, 0); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_t = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L16_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "bmtools/exact/moments.pyx":247 * # normalise probabilities by dividing through by normalisation constant * # in parallel over multiple threads * for t in prange(num_threads, nogil=True, schedule='static', chunksize=1, # <<<<<<<<<<<<<< * num_threads=num_threads): * normalise_probabilities(probs[intervals[t]:intervals[t+1]], */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L17; } __pyx_L16_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L17:; } } /* "bmtools/exact/moments.pyx":251 * normalise_probabilities(probs[intervals[t]:intervals[t+1]], * norm_consts[0]) * return probs, norm_consts[0] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_probs, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_26 = 0; __pyx_t_4 = PyFloat_FromDouble((*((double *) ( /* dim=0 */ (__pyx_v_norm_consts.data + __pyx_t_26 * __pyx_v_norm_consts.strides[0]) )))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "bmtools/exact/moments.pyx":184 * * * def calculate_probs_parallel( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, bint force=False, * int num_threads=2,): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_14, 1); __Pyx_AddTraceback("bmtools.exact.moments.calculate_probs_parallel", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_states, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_norm_consts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_probs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_intervals, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_biases, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "bmtools/exact/moments.pyx":254 * * * cdef void calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, state_t[:] state, * double* norm_const, double[:] probs, */ static void __pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, double *__pyx_v_norm_const, __Pyx_memviewslice __pyx_v_probs, long __pyx_v_start_state_index, long __pyx_v_end_state_index) { long __pyx_v_i; long __pyx_t_1; long __pyx_t_2; Py_ssize_t __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; /* "bmtools/exact/moments.pyx":263 * """ * cdef long i * index_to_state(start_state_index, state) # <<<<<<<<<<<<<< * for i in range(end_state_index - start_state_index): * probs[i] = exp(neg_energy(state, weights, biases)) */ __pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0); /* "bmtools/exact/moments.pyx":264 * cdef long i * index_to_state(start_state_index, state) * for i in range(end_state_index - start_state_index): # <<<<<<<<<<<<<< * probs[i] = exp(neg_energy(state, weights, biases)) * norm_const[0] += probs[i] */ __pyx_t_1 = (__pyx_v_end_state_index - __pyx_v_start_state_index); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":265 * index_to_state(start_state_index, state) * for i in range(end_state_index - start_state_index): * probs[i] = exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<< * norm_const[0] += probs[i] * next_state(state, start_state_index + i + 1) */ __pyx_t_3 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_3 * __pyx_v_probs.strides[0]) )) = exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases)); /* "bmtools/exact/moments.pyx":266 * for i in range(end_state_index - start_state_index): * probs[i] = exp(neg_energy(state, weights, biases)) * norm_const[0] += probs[i] # <<<<<<<<<<<<<< * next_state(state, start_state_index + i + 1) * */ __pyx_t_4 = 0; __pyx_t_5 = __pyx_v_i; (__pyx_v_norm_const[__pyx_t_4]) = ((__pyx_v_norm_const[__pyx_t_4]) + (*((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_5 * __pyx_v_probs.strides[0]) )))); /* "bmtools/exact/moments.pyx":267 * probs[i] = exp(neg_energy(state, weights, biases)) * norm_const[0] += probs[i] * next_state(state, start_state_index + i + 1) # <<<<<<<<<<<<<< * * */ __pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, ((__pyx_v_start_state_index + __pyx_v_i) + 1)); } /* "bmtools/exact/moments.pyx":254 * * * cdef void calc_unnormed_probs_for_state_range( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, state_t[:] state, * double* norm_const, double[:] probs, */ /* function exit code */ } /* "bmtools/exact/moments.pyx":270 * * * cdef void normalise_probabilities(double[:] probs, double norm_const) nogil: # <<<<<<<<<<<<<< * """Divides an array of probabilities by a normalisation constant.""" * cdef long i */ static void __pyx_f_7bmtools_5exact_7moments_normalise_probabilities(__Pyx_memviewslice __pyx_v_probs, double __pyx_v_norm_const) { long __pyx_v_i; Py_ssize_t __pyx_t_1; long __pyx_t_2; Py_ssize_t __pyx_t_3; /* "bmtools/exact/moments.pyx":273 * """Divides an array of probabilities by a normalisation constant.""" * cdef long i * for i in range(probs.shape[0]): # <<<<<<<<<<<<<< * probs[i] /= norm_const * */ __pyx_t_1 = (__pyx_v_probs.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":274 * cdef long i * for i in range(probs.shape[0]): * probs[i] /= norm_const # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_probs.data + __pyx_t_3 * __pyx_v_probs.strides[0]) )) /= __pyx_v_norm_const; } /* "bmtools/exact/moments.pyx":270 * * * cdef void normalise_probabilities(double[:] probs, double norm_const) nogil: # <<<<<<<<<<<<<< * """Divides an array of probabilities by a normalisation constant.""" * cdef long i */ /* function exit code */ } /* "bmtools/exact/moments.pyx":277 * * * cdef void accum_moments_for_state_range( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, state_t[:] state, * double* norm_const, double[:] first_mom, double[:, :] second_mom, */ static void __pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, double *__pyx_v_norm_const, __Pyx_memviewslice __pyx_v_first_mom, __Pyx_memviewslice __pyx_v_second_mom, long __pyx_v_start_state_index, long __pyx_v_end_state_index) { double __pyx_v_prob; long __pyx_v_state_index; int __pyx_v_i; int __pyx_v_j; Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; long __pyx_t_8; long __pyx_t_9; long __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; /* "bmtools/exact/moments.pyx":285 * corresponding to a contiguous range of state integer indices. * """ * cdef double prob = 0. # <<<<<<<<<<<<<< * cdef long state_index * cdef int i, j */ __pyx_v_prob = 0.; /* "bmtools/exact/moments.pyx":288 * cdef long state_index * cdef int i, j * index_to_state(start_state_index, state) # <<<<<<<<<<<<<< * for i in range(weights.shape[0]): * first_mom[i] = 0. */ __pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0); /* "bmtools/exact/moments.pyx":289 * cdef int i, j * index_to_state(start_state_index, state) * for i in range(weights.shape[0]): # <<<<<<<<<<<<<< * first_mom[i] = 0. * for j in range(i): */ __pyx_t_1 = (__pyx_v_weights.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":290 * index_to_state(start_state_index, state) * for i in range(weights.shape[0]): * first_mom[i] = 0. # <<<<<<<<<<<<<< * for j in range(i): * second_mom[i, j] = 0. */ __pyx_t_3 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_3 * __pyx_v_first_mom.strides[0]) )) = 0.; /* "bmtools/exact/moments.pyx":291 * for i in range(weights.shape[0]): * first_mom[i] = 0. * for j in range(i): # <<<<<<<<<<<<<< * second_mom[i, j] = 0. * for state_index in range(start_state_index, end_state_index): */ __pyx_t_4 = __pyx_v_i; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "bmtools/exact/moments.pyx":292 * first_mom[i] = 0. * for j in range(i): * second_mom[i, j] = 0. # <<<<<<<<<<<<<< * for state_index in range(start_state_index, end_state_index): * prob = exp(neg_energy(state, weights, biases)) */ __pyx_t_6 = __pyx_v_i; __pyx_t_7 = __pyx_v_j; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_6 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_7 * __pyx_v_second_mom.strides[1]) )) = 0.; } } /* "bmtools/exact/moments.pyx":293 * for j in range(i): * second_mom[i, j] = 0. * for state_index in range(start_state_index, end_state_index): # <<<<<<<<<<<<<< * prob = exp(neg_energy(state, weights, biases)) * norm_const[0] += prob */ __pyx_t_8 = __pyx_v_end_state_index; for (__pyx_t_9 = __pyx_v_start_state_index; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_state_index = __pyx_t_9; /* "bmtools/exact/moments.pyx":294 * second_mom[i, j] = 0. * for state_index in range(start_state_index, end_state_index): * prob = exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<< * norm_const[0] += prob * for i in range(state.shape[0]): */ __pyx_v_prob = exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases)); /* "bmtools/exact/moments.pyx":295 * for state_index in range(start_state_index, end_state_index): * prob = exp(neg_energy(state, weights, biases)) * norm_const[0] += prob # <<<<<<<<<<<<<< * for i in range(state.shape[0]): * first_mom[i] += state[i] * prob */ __pyx_t_10 = 0; (__pyx_v_norm_const[__pyx_t_10]) = ((__pyx_v_norm_const[__pyx_t_10]) + __pyx_v_prob); /* "bmtools/exact/moments.pyx":296 * prob = exp(neg_energy(state, weights, biases)) * norm_const[0] += prob * for i in range(state.shape[0]): # <<<<<<<<<<<<<< * first_mom[i] += state[i] * prob * for j in range(i): */ __pyx_t_1 = (__pyx_v_state.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":297 * norm_const[0] += prob * for i in range(state.shape[0]): * first_mom[i] += state[i] * prob # <<<<<<<<<<<<<< * for j in range(i): * second_mom[i, j] += state[i] * state[j] * prob */ __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_12 * __pyx_v_first_mom.strides[0]) )) += ((*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_11 * __pyx_v_state.strides[0]) ))) * __pyx_v_prob); /* "bmtools/exact/moments.pyx":298 * for i in range(state.shape[0]): * first_mom[i] += state[i] * prob * for j in range(i): # <<<<<<<<<<<<<< * second_mom[i, j] += state[i] * state[j] * prob * next_state(state, state_index + 1) */ __pyx_t_4 = __pyx_v_i; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "bmtools/exact/moments.pyx":299 * first_mom[i] += state[i] * prob * for j in range(i): * second_mom[i, j] += state[i] * state[j] * prob # <<<<<<<<<<<<<< * next_state(state, state_index + 1) * */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_i; __pyx_t_16 = __pyx_v_j; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_15 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_16 * __pyx_v_second_mom.strides[1]) )) += (((*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_13 * __pyx_v_state.strides[0]) ))) * (*((__pyx_t_7bmtools_5exact_7helpers_state_t *) ( /* dim=0 */ (__pyx_v_state.data + __pyx_t_14 * __pyx_v_state.strides[0]) )))) * __pyx_v_prob); } } /* "bmtools/exact/moments.pyx":300 * for j in range(i): * second_mom[i, j] += state[i] * state[j] * prob * next_state(state, state_index + 1) # <<<<<<<<<<<<<< * * */ __pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, (__pyx_v_state_index + 1)); } /* "bmtools/exact/moments.pyx":277 * * * cdef void accum_moments_for_state_range( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, state_t[:] state, * double* norm_const, double[:] first_mom, double[:, :] second_mom, */ /* function exit code */ } /* "bmtools/exact/moments.pyx":303 * * * cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<< * state_t[:] state, long start_state_index=0, * long end_state_index=-1) nogil: */ static double __pyx_f_7bmtools_5exact_7moments_calc_norm_const(__Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_biases, __Pyx_memviewslice __pyx_v_state, struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const *__pyx_optional_args) { long __pyx_v_start_state_index = ((long)0); long __pyx_v_end_state_index = ((long)-1L); double __pyx_v_norm_const; long __pyx_v_state_index; double __pyx_r; int __pyx_t_1; long __pyx_t_2; long __pyx_t_3; if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_start_state_index = __pyx_optional_args->start_state_index; if (__pyx_optional_args->__pyx_n > 1) { __pyx_v_end_state_index = __pyx_optional_args->end_state_index; } } } /* "bmtools/exact/moments.pyx":311 * including probabilities of states with indices in specified range. * """ * cdef double norm_const = 0. # <<<<<<<<<<<<<< * cdef long state_index * index_to_state(start_state_index, state) */ __pyx_v_norm_const = 0.; /* "bmtools/exact/moments.pyx":313 * cdef double norm_const = 0. * cdef long state_index * index_to_state(start_state_index, state) # <<<<<<<<<<<<<< * if end_state_index == -1: * end_state_index = 2**weights.shape[0] */ __pyx_f_7bmtools_5exact_7helpers_index_to_state(__pyx_v_start_state_index, __pyx_v_state, 0); /* "bmtools/exact/moments.pyx":314 * cdef long state_index * index_to_state(start_state_index, state) * if end_state_index == -1: # <<<<<<<<<<<<<< * end_state_index = 2**weights.shape[0] * for state_index in range(start_state_index, end_state_index): */ __pyx_t_1 = ((__pyx_v_end_state_index == -1L) != 0); if (__pyx_t_1) { /* "bmtools/exact/moments.pyx":315 * index_to_state(start_state_index, state) * if end_state_index == -1: * end_state_index = 2**weights.shape[0] # <<<<<<<<<<<<<< * for state_index in range(start_state_index, end_state_index): * norm_const += exp(neg_energy(state, weights, biases)) */ __pyx_v_end_state_index = __Pyx_pow_Py_ssize_t(2, (__pyx_v_weights.shape[0])); /* "bmtools/exact/moments.pyx":314 * cdef long state_index * index_to_state(start_state_index, state) * if end_state_index == -1: # <<<<<<<<<<<<<< * end_state_index = 2**weights.shape[0] * for state_index in range(start_state_index, end_state_index): */ } /* "bmtools/exact/moments.pyx":316 * if end_state_index == -1: * end_state_index = 2**weights.shape[0] * for state_index in range(start_state_index, end_state_index): # <<<<<<<<<<<<<< * norm_const += exp(neg_energy(state, weights, biases)) * next_state(state, state_index+1) */ __pyx_t_2 = __pyx_v_end_state_index; for (__pyx_t_3 = __pyx_v_start_state_index; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_state_index = __pyx_t_3; /* "bmtools/exact/moments.pyx":317 * end_state_index = 2**weights.shape[0] * for state_index in range(start_state_index, end_state_index): * norm_const += exp(neg_energy(state, weights, biases)) # <<<<<<<<<<<<<< * next_state(state, state_index+1) * return norm_const */ __pyx_v_norm_const = (__pyx_v_norm_const + exp(__pyx_f_7bmtools_5exact_7helpers_neg_energy(__pyx_v_state, __pyx_v_weights, __pyx_v_biases))); /* "bmtools/exact/moments.pyx":318 * for state_index in range(start_state_index, end_state_index): * norm_const += exp(neg_energy(state, weights, biases)) * next_state(state, state_index+1) # <<<<<<<<<<<<<< * return norm_const * */ __pyx_f_7bmtools_5exact_7helpers_next_state(__pyx_v_state, (__pyx_v_state_index + 1)); } /* "bmtools/exact/moments.pyx":319 * norm_const += exp(neg_energy(state, weights, biases)) * next_state(state, state_index+1) * return norm_const # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_norm_const; goto __pyx_L0; /* "bmtools/exact/moments.pyx":303 * * * cdef double calc_norm_const(double[:,:] weights, double[:] biases, # <<<<<<<<<<<<<< * state_t[:] state, long start_state_index=0, * long end_state_index=-1) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "bmtools/exact/moments.pyx":322 * * * cdef void normalise_first_moment( # <<<<<<<<<<<<<< * double[:] first_mom, double norm_const) nogil: * """ */ static void __pyx_f_7bmtools_5exact_7moments_normalise_first_moment(__Pyx_memviewslice __pyx_v_first_mom, double __pyx_v_norm_const) { int __pyx_v_i; Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; /* "bmtools/exact/moments.pyx":328 * """ * cdef int i * for i in range(first_mom.shape[0]): # <<<<<<<<<<<<<< * first_mom[i] /= norm_const * */ __pyx_t_1 = (__pyx_v_first_mom.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":329 * cdef int i * for i in range(first_mom.shape[0]): * first_mom[i] /= norm_const # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_first_mom.data + __pyx_t_3 * __pyx_v_first_mom.strides[0]) )) /= __pyx_v_norm_const; } /* "bmtools/exact/moments.pyx":322 * * * cdef void normalise_first_moment( # <<<<<<<<<<<<<< * double[:] first_mom, double norm_const) nogil: * """ */ /* function exit code */ } /* "bmtools/exact/moments.pyx":332 * * * cdef void combine_and_normalise_first_moments( # <<<<<<<<<<<<<< * double[:, :] first_moms, double norm_const) nogil: * """ */ static void __pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments(__Pyx_memviewslice __pyx_v_first_moms, double __pyx_v_norm_const) { int __pyx_v_i; int __pyx_v_j; Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; /* "bmtools/exact/moments.pyx":340 * """ * cdef int i, j * for i in range(1, first_moms.shape[0]): # <<<<<<<<<<<<<< * for j in range(first_moms.shape[1]): * first_moms[0, j] += first_moms[i, j] */ __pyx_t_1 = (__pyx_v_first_moms.shape[0]); for (__pyx_t_2 = 1; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":341 * cdef int i, j * for i in range(1, first_moms.shape[0]): * for j in range(first_moms.shape[1]): # <<<<<<<<<<<<<< * first_moms[0, j] += first_moms[i, j] * if i == first_moms.shape[0] - 1: */ __pyx_t_3 = (__pyx_v_first_moms.shape[1]); for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_j = __pyx_t_4; /* "bmtools/exact/moments.pyx":342 * for i in range(1, first_moms.shape[0]): * for j in range(first_moms.shape[1]): * first_moms[0, j] += first_moms[i, j] # <<<<<<<<<<<<<< * if i == first_moms.shape[0] - 1: * first_moms[0, j] /= norm_const */ __pyx_t_5 = __pyx_v_i; __pyx_t_6 = __pyx_v_j; __pyx_t_7 = 0; __pyx_t_8 = __pyx_v_j; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_7 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_8 * __pyx_v_first_moms.strides[1]) )) += (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_5 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_6 * __pyx_v_first_moms.strides[1]) ))); /* "bmtools/exact/moments.pyx":343 * for j in range(first_moms.shape[1]): * first_moms[0, j] += first_moms[i, j] * if i == first_moms.shape[0] - 1: # <<<<<<<<<<<<<< * first_moms[0, j] /= norm_const * */ __pyx_t_9 = ((__pyx_v_i == ((__pyx_v_first_moms.shape[0]) - 1)) != 0); if (__pyx_t_9) { /* "bmtools/exact/moments.pyx":344 * first_moms[0, j] += first_moms[i, j] * if i == first_moms.shape[0] - 1: * first_moms[0, j] /= norm_const # <<<<<<<<<<<<<< * * */ __pyx_t_10 = 0; __pyx_t_11 = __pyx_v_j; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_first_moms.data + __pyx_t_10 * __pyx_v_first_moms.strides[0]) ) + __pyx_t_11 * __pyx_v_first_moms.strides[1]) )) /= __pyx_v_norm_const; /* "bmtools/exact/moments.pyx":343 * for j in range(first_moms.shape[1]): * first_moms[0, j] += first_moms[i, j] * if i == first_moms.shape[0] - 1: # <<<<<<<<<<<<<< * first_moms[0, j] /= norm_const * */ } } } /* "bmtools/exact/moments.pyx":332 * * * cdef void combine_and_normalise_first_moments( # <<<<<<<<<<<<<< * double[:, :] first_moms, double norm_const) nogil: * """ */ /* function exit code */ } /* "bmtools/exact/moments.pyx":347 * * * cdef void normalise_and_reflect_second_moment(double[:, :] second_mom, # <<<<<<<<<<<<<< * double norm_const) nogil: * """ */ static void __pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment(__Pyx_memviewslice __pyx_v_second_mom, double __pyx_v_norm_const) { int __pyx_v_i; int __pyx_v_j; Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; /* "bmtools/exact/moments.pyx":356 * """ * cdef int i, j * for i in range(second_mom.shape[0]): # <<<<<<<<<<<<<< * second_mom[i, i] = 1. * for j in range(i): */ __pyx_t_1 = (__pyx_v_second_mom.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":357 * cdef int i, j * for i in range(second_mom.shape[0]): * second_mom[i, i] = 1. # <<<<<<<<<<<<<< * for j in range(i): * second_mom[i, j] /= norm_const */ __pyx_t_3 = __pyx_v_i; __pyx_t_4 = __pyx_v_i; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_3 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_4 * __pyx_v_second_mom.strides[1]) )) = 1.; /* "bmtools/exact/moments.pyx":358 * for i in range(second_mom.shape[0]): * second_mom[i, i] = 1. * for j in range(i): # <<<<<<<<<<<<<< * second_mom[i, j] /= norm_const * second_mom[j, i] = second_mom[i, j] */ __pyx_t_5 = __pyx_v_i; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "bmtools/exact/moments.pyx":359 * second_mom[i, i] = 1. * for j in range(i): * second_mom[i, j] /= norm_const # <<<<<<<<<<<<<< * second_mom[j, i] = second_mom[i, j] * */ __pyx_t_7 = __pyx_v_i; __pyx_t_8 = __pyx_v_j; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_7 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_8 * __pyx_v_second_mom.strides[1]) )) /= __pyx_v_norm_const; /* "bmtools/exact/moments.pyx":360 * for j in range(i): * second_mom[i, j] /= norm_const * second_mom[j, i] = second_mom[i, j] # <<<<<<<<<<<<<< * * */ __pyx_t_9 = __pyx_v_i; __pyx_t_10 = __pyx_v_j; __pyx_t_11 = __pyx_v_j; __pyx_t_12 = __pyx_v_i; *((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_11 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_12 * __pyx_v_second_mom.strides[1]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_mom.data + __pyx_t_9 * __pyx_v_second_mom.strides[0]) ) + __pyx_t_10 * __pyx_v_second_mom.strides[1]) ))); } } /* "bmtools/exact/moments.pyx":347 * * * cdef void normalise_and_reflect_second_moment(double[:, :] second_mom, # <<<<<<<<<<<<<< * double norm_const) nogil: * """ */ /* function exit code */ } /* "bmtools/exact/moments.pyx":363 * * * cdef void combine_normalise_and_reflect_second_moments( # <<<<<<<<<<<<<< * double[:, :, :] second_moms, double norm_const) nogil: * """ */ static void __pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments(__Pyx_memviewslice __pyx_v_second_moms, double __pyx_v_norm_const) { int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; int __pyx_t_8; int __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; int __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; /* "bmtools/exact/moments.pyx":373 * """ * cdef int i, j, k * for i in range(1, second_moms.shape[0]): # <<<<<<<<<<<<<< * for j in range(second_moms.shape[1]): * second_moms[0, j, j] = 1. */ __pyx_t_1 = (__pyx_v_second_moms.shape[0]); for (__pyx_t_2 = 1; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "bmtools/exact/moments.pyx":374 * cdef int i, j, k * for i in range(1, second_moms.shape[0]): * for j in range(second_moms.shape[1]): # <<<<<<<<<<<<<< * second_moms[0, j, j] = 1. * for k in range(j): */ __pyx_t_3 = (__pyx_v_second_moms.shape[1]); for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_j = __pyx_t_4; /* "bmtools/exact/moments.pyx":375 * for i in range(1, second_moms.shape[0]): * for j in range(second_moms.shape[1]): * second_moms[0, j, j] = 1. # <<<<<<<<<<<<<< * for k in range(j): * second_moms[0, j, k] += second_moms[i, j, k] */ __pyx_t_5 = 0; __pyx_t_6 = __pyx_v_j; __pyx_t_7 = __pyx_v_j; *((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_5 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_6 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_7 * __pyx_v_second_moms.strides[2]) )) = 1.; /* "bmtools/exact/moments.pyx":376 * for j in range(second_moms.shape[1]): * second_moms[0, j, j] = 1. * for k in range(j): # <<<<<<<<<<<<<< * second_moms[0, j, k] += second_moms[i, j, k] * if i == second_moms.shape[0] - 1: */ __pyx_t_8 = __pyx_v_j; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_k = __pyx_t_9; /* "bmtools/exact/moments.pyx":377 * second_moms[0, j, j] = 1. * for k in range(j): * second_moms[0, j, k] += second_moms[i, j, k] # <<<<<<<<<<<<<< * if i == second_moms.shape[0] - 1: * second_moms[0, j, k] /= norm_const */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = __pyx_v_j; __pyx_t_12 = __pyx_v_k; __pyx_t_13 = 0; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_k; *((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_13 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_14 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_15 * __pyx_v_second_moms.strides[2]) )) += (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_10 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_11 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_12 * __pyx_v_second_moms.strides[2]) ))); /* "bmtools/exact/moments.pyx":378 * for k in range(j): * second_moms[0, j, k] += second_moms[i, j, k] * if i == second_moms.shape[0] - 1: # <<<<<<<<<<<<<< * second_moms[0, j, k] /= norm_const * second_moms[0, k, j] = second_moms[0, j, k] */ __pyx_t_16 = ((__pyx_v_i == ((__pyx_v_second_moms.shape[0]) - 1)) != 0); if (__pyx_t_16) { /* "bmtools/exact/moments.pyx":379 * second_moms[0, j, k] += second_moms[i, j, k] * if i == second_moms.shape[0] - 1: * second_moms[0, j, k] /= norm_const # <<<<<<<<<<<<<< * second_moms[0, k, j] = second_moms[0, j, k] */ __pyx_t_17 = 0; __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_k; *((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_17 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_18 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_19 * __pyx_v_second_moms.strides[2]) )) /= __pyx_v_norm_const; /* "bmtools/exact/moments.pyx":380 * if i == second_moms.shape[0] - 1: * second_moms[0, j, k] /= norm_const * second_moms[0, k, j] = second_moms[0, j, k] # <<<<<<<<<<<<<< */ __pyx_t_20 = 0; __pyx_t_21 = __pyx_v_j; __pyx_t_22 = __pyx_v_k; __pyx_t_23 = 0; __pyx_t_24 = __pyx_v_k; __pyx_t_25 = __pyx_v_j; *((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_23 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_24 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_25 * __pyx_v_second_moms.strides[2]) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_second_moms.data + __pyx_t_20 * __pyx_v_second_moms.strides[0]) ) + __pyx_t_21 * __pyx_v_second_moms.strides[1]) ) + __pyx_t_22 * __pyx_v_second_moms.strides[2]) ))); /* "bmtools/exact/moments.pyx":378 * for k in range(j): * second_moms[0, j, k] += second_moms[i, j, k] * if i == second_moms.shape[0] - 1: # <<<<<<<<<<<<<< * second_moms[0, j, k] /= norm_const * second_moms[0, k, j] = second_moms[0, j, k] */ } } } } /* "bmtools/exact/moments.pyx":363 * * * cdef void combine_normalise_and_reflect_second_moments( # <<<<<<<<<<<<<< * double[:, :, :] second_moms, double norm_const) nogil: * """ */ /* function exit code */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":230 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":233 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":236 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":245 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":248 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 248, __pyx_L1_error) /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":251 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 277, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 277, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":278 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":280 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":296 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":300 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":303 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":305 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 341, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 341, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 341, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":342 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":343 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":345 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 345, __pyx_L1_error) /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":347 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":348 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":352 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":353 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":355 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 357, __pyx_L1_error) /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":360 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":362 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":364 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":366 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":370 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":375 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":380 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":379 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":381 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":383 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":387 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 389, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 389, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":390 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":392 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":397 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":399 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 399, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 399, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 402, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":403 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":405 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 405, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":406 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":409 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 409, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 409, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 411, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":412 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":416 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 416, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":418 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":424 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 424, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":425 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 425, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":428 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":435 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 435, __pyx_L1_error) /* "View.MemoryView":436 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":440 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":445 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":448 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 450, __pyx_L1_error) /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":451 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":453 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":455 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":457 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":459 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 459, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 464, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":465 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":468 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __Pyx_PyThreadState_declare __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":471 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) __PYX_ERR(1, 471, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":472 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 472, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":477 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":480 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 480, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":482 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":487 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":488 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 484, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":493 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":499 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 499, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":501 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 503, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":509 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":511 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":514 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":516 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":521 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":526 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":528 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":529 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":530 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":531 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":532 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":533 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":540 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 540, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":541 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 541, __pyx_L1_error) /* "View.MemoryView":542 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":546 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 556, __pyx_L1_error) /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":558 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":565 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":569 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":573 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":577 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":582 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":584 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":585 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":587 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":589 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":593 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":595 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":599 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":602 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":608 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":609 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":614 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":615 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 615, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":619 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":621 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":622 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":627 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":631 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":633 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":634 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 634, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":639 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 639, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":644 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":645 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":646 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":650 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":658 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":660 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":662 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":663 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":664 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 665, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 665, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":669 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 671, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":672 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":675 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 675, __pyx_L1_error) /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":677 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":678 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 678, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":680 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(1, 680, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":684 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":687 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":697 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":704 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":708 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 708, __pyx_L1_error) } } #endif /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":711 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 711, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":712 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":714 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":715 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":721 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":722 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":727 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":728 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 732, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 732, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":737 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 737, __pyx_L1_error) /* "View.MemoryView":734 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 734, __pyx_L1_error) /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":741 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":742 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":743 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":744 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":746 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":747 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 747, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":748 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":750 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":751 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 751, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":752 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":754 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 754, __pyx_L1_error) /* "View.MemoryView":760 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":764 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 764, __pyx_L1_error) } /* "View.MemoryView":765 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 765, __pyx_L1_error) } /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 763, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 763, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":769 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 768, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":816 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":818 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 818, __pyx_L1_error) /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":821 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":824 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 824, __pyx_L1_error) /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":829 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":834 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":836 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":841 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":852 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":854 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":861 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":864 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":870 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":871 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":872 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":876 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":878 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":885 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":886 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 885, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":888 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":890 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":898 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":899 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":903 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 903, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 903, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":904 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":906 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":907 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":909 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":912 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 914, __pyx_L1_error) /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 917, __pyx_L1_error) /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":919 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":921 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":923 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":930 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":932 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":933 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":937 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":938 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":939 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":940 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":943 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 943, __pyx_L1_error) /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":945 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":963 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 967, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":969 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":973 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 973, __pyx_L1_error) /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":975 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":979 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":994 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":999 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1001 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1002 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1004 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1005 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1007 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1008 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1009 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1010 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1011 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1013 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1015 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1016 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1019 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1020 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1022 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1023 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1025 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1026 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1027 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1029 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1030 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1032 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1039 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1039, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1040 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1042 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1043 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1050 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1051 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1052 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1054 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1055 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1057 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1058 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1059 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1060 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1066 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1067 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1078 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1079 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1081 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1082 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1084 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1086 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1094 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1096 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1104 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1105 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1107 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1109 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1110 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1112 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1114 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1115 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1118 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1120 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1130 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1131 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1132 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1137 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1138 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1140 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1141 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1142 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1143 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1145 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1146 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1150 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1151 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1156 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1163 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1165 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1166 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1168 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1181 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1182 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1183 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1185 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1186 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1187 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1189 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1203 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1204 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1206 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1208 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 1208, __pyx_L1_error) /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1211 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1212 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1213 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1214 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1215 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1217 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1221 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1223 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1226 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1228 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1230 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1238 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1237 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1237, __pyx_L1_error) /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1242 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1242, __pyx_L1_error) /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1247 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1247, __pyx_L1_error) /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1249 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1249, __pyx_L1_error) } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1260 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1261 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1263 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1264 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1265 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1269 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1273 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1275 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1278 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1279 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1281 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1281, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1284, __pyx_L1_error) /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1289 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1291 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 1291, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1292 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1298 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1305 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1306 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1307 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1313 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1313, __pyx_L1_error) /* "View.MemoryView":1314 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1314, __pyx_L1_error) /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1316 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1317 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1318 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1328 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1330 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1331 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1332 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1333 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1335 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1336 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1337 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1338 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1351 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1358 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1365 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1368 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1370 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1372 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1373 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1375 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1384 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1385 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1387 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1395 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1396 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1399 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1400 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1401 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1403 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1404 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1406 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "bmtools.exact.moments.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "bmtools.exact.moments.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "bmtools.exact.moments.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "bmtools.exact.moments._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "moments", __pyx_k_Boltzmann_machine_moment_calcula, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_kp_s_Number_of_threads_must_be_0, __pyx_k_Number_of_threads_must_be_0, sizeof(__pyx_k_Number_of_threads_must_be_0), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_all_in_place, __pyx_k_all_in_place, sizeof(__pyx_k_all_in_place), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_biases, __pyx_k_biases, sizeof(__pyx_k_biases), 0, 0, 1, 1}, {&__pyx_n_s_bmtools_exact_moments, __pyx_k_bmtools_exact_moments, sizeof(__pyx_k_bmtools_exact_moments), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_calculate_moments_parallel, __pyx_k_calculate_moments_parallel, sizeof(__pyx_k_calculate_moments_parallel), 0, 0, 1, 1}, {&__pyx_n_s_calculate_moments_sequential, __pyx_k_calculate_moments_sequential, sizeof(__pyx_k_calculate_moments_sequential), 0, 0, 1, 1}, {&__pyx_n_s_calculate_probs_parallel, __pyx_k_calculate_probs_parallel, sizeof(__pyx_k_calculate_probs_parallel), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_first_mom, __pyx_k_first_mom, sizeof(__pyx_k_first_mom), 0, 0, 1, 1}, {&__pyx_n_s_first_moms, __pyx_k_first_moms, sizeof(__pyx_k_first_moms), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_force, __pyx_k_force, sizeof(__pyx_k_force), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_k_home_matt_Projects_boltzmann_ma, sizeof(__pyx_k_home_matt_Projects_boltzmann_ma), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_intervals, __pyx_k_intervals, sizeof(__pyx_k_intervals), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_norm_const, __pyx_k_norm_const, sizeof(__pyx_k_norm_const), 0, 0, 1, 1}, {&__pyx_n_s_norm_consts, __pyx_k_norm_consts, sizeof(__pyx_k_norm_consts), 0, 0, 1, 1}, {&__pyx_n_s_num_states, __pyx_k_num_states, sizeof(__pyx_k_num_states), 0, 0, 1, 1}, {&__pyx_n_s_num_threads, __pyx_k_num_threads, sizeof(__pyx_k_num_threads), 0, 0, 1, 1}, {&__pyx_n_s_num_units, __pyx_k_num_units, sizeof(__pyx_k_num_units), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_prob, __pyx_k_prob, sizeof(__pyx_k_prob), 0, 0, 1, 1}, {&__pyx_n_s_probs, __pyx_k_probs, sizeof(__pyx_k_probs), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_second_mom, __pyx_k_second_mom, sizeof(__pyx_k_second_mom), 0, 0, 1, 1}, {&__pyx_n_s_second_moms, __pyx_k_second_moms, sizeof(__pyx_k_second_moms), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_state, __pyx_k_state, sizeof(__pyx_k_state), 0, 0, 1, 1}, {&__pyx_n_s_state_index, __pyx_k_state_index, sizeof(__pyx_k_state_index), 0, 0, 1, 1}, {&__pyx_n_s_states, __pyx_k_states, sizeof(__pyx_k_states), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 133, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 167, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 396, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 425, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 599, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 818, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "bmtools/exact/moments.pyx":133 * cdef long num_states = 2**num_units * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<< * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Number_of_threads_must_be_0); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "bmtools/exact/moments.pyx":219 * cdef long num_states = 2 ** num_units * if num_threads <= 0: * raise ValueError('Number of threads must be > 0') # <<<<<<<<<<<<<< * check_state_space_size(num_units, force) * cdef state_t[:, :] states = array( */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Number_of_threads_must_be_0); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 219, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 671, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "bmtools/exact/moments.pyx":24 * double log(double x) nogil * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False): * """Calculate Boltzmann machine distribution moments. */ __pyx_tuple__19 = PyTuple_Pack(13, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_state_index, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_num_units, __pyx_n_s_prob, __pyx_n_s_norm_const, __pyx_n_s_num_states, __pyx_n_s_state, __pyx_n_s_first_mom, __pyx_n_s_second_mom); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(3, 0, 13, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_moments_sequential, 24, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 24, __pyx_L1_error) /* "bmtools/exact/moments.pyx":74 * * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False, int num_threads=2, * double[:] norm_consts=None, */ __pyx_tuple__21 = PyTuple_Pack(14, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_num_threads, __pyx_n_s_norm_consts, __pyx_n_s_first_moms, __pyx_n_s_second_moms, __pyx_n_s_t, __pyx_n_s_num_units, __pyx_n_s_prob, __pyx_n_s_num_states, __pyx_n_s_states, __pyx_n_s_all_in_place, __pyx_n_s_intervals); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(7, 0, 14, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_moments_parallel, 74, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 74, __pyx_L1_error) /* "bmtools/exact/moments.pyx":184 * * * def calculate_probs_parallel( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, bint force=False, * int num_threads=2,): */ __pyx_tuple__23 = PyTuple_Pack(11, __pyx_n_s_weights, __pyx_n_s_biases, __pyx_n_s_force, __pyx_n_s_num_threads, __pyx_n_s_t, __pyx_n_s_num_units, __pyx_n_s_num_states, __pyx_n_s_states, __pyx_n_s_norm_consts, __pyx_n_s_probs, __pyx_n_s_intervals); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(4, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_matt_Projects_boltzmann_ma, __pyx_n_s_calculate_probs_parallel, 184, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 184, __pyx_L1_error) /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initmoments(void); /*proto*/ PyMODINIT_FUNC initmoments(void) #else PyMODINIT_FUNC PyInit_moments(void); /*proto*/ PyMODINIT_FUNC PyInit_moments(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; static PyThread_type_lock __pyx_t_7[8]; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_moments(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("moments", __pyx_methods, __pyx_k_Boltzmann_machine_moment_calcula, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_bmtools__exact__moments) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "bmtools.exact.moments")) { if (unlikely(PyDict_SetItemString(modules, "bmtools.exact.moments", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ if (__Pyx_ExportFunction("calc_unnormed_probs_for_state_range", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_calc_unnormed_probs_for_state_range, "void (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, long, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("normalise_probabilities", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_probabilities, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("accum_moments_for_state_range", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_accum_moments_for_state_range, "void (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double *, __Pyx_memviewslice, __Pyx_memviewslice, long, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("calc_norm_const", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_calc_norm_const, "double (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, struct __pyx_opt_args_7bmtools_5exact_7moments_calc_norm_const *__pyx_optional_args)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("normalise_first_moment", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_first_moment, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("combine_and_normalise_first_moments", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_combine_and_normalise_first_moments, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("normalise_and_reflect_second_moment", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_normalise_and_reflect_second_moment, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("combine_normalise_and_reflect_second_moments", (void (*)(void))__pyx_f_7bmtools_5exact_7moments_combine_normalise_and_reflect_second_moments, "void (__Pyx_memviewslice, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 275, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ __pyx_t_1 = __Pyx_ImportModule("bmtools.exact.helpers"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportVoidPtr(__pyx_t_1, "state_t_code", (void **)&__pyx_vp_7bmtools_5exact_7helpers_state_t_code, "char *") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Function import code ---*/ __pyx_t_2 = __Pyx_ImportModule("bmtools.exact.helpers"); if (!__pyx_t_2) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "neg_energy", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_neg_energy, "double (__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "check_state_space_size", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_check_state_space_size, "void (int, int, int __pyx_skip_dispatch)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "partition_state_space", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_partition_state_space, "__Pyx_memviewslice (long, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "index_to_state", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_index_to_state, "void (long, __Pyx_memviewslice, int __pyx_skip_dispatch)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "next_state", (void (**)(void))&__pyx_f_7bmtools_5exact_7helpers_next_state, "void (__Pyx_memviewslice, long)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "bmtools/exact/moments.pyx":24 * double log(double x) nogil * * def calculate_moments_sequential(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False): * """Calculate Boltzmann machine distribution moments. */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_1calculate_moments_sequential, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_moments_sequential, __pyx_t_3) < 0) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "bmtools/exact/moments.pyx":76 * def calculate_moments_parallel(double[:, :] weights, double[:] biases, * bint force=False, int num_threads=2, * double[:] norm_consts=None, # <<<<<<<<<<<<<< * double[:, :] first_moms=None, * double[:, :, :] second_moms=None): */ __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(Py_None); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 76, __pyx_L1_error) __pyx_k_ = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "bmtools/exact/moments.pyx":77 * bint force=False, int num_threads=2, * double[:] norm_consts=None, * double[:, :] first_moms=None, # <<<<<<<<<<<<<< * double[:, :, :] second_moms=None): * """Calculate Boltzmann machine distribution moments. */ __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(Py_None); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 77, __pyx_L1_error) __pyx_k__2 = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "bmtools/exact/moments.pyx":78 * double[:] norm_consts=None, * double[:, :] first_moms=None, * double[:, :, :] second_moms=None): # <<<<<<<<<<<<<< * """Calculate Boltzmann machine distribution moments. * */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(Py_None); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 78, __pyx_L1_error) __pyx_k__3 = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "bmtools/exact/moments.pyx":74 * * * def calculate_moments_parallel(double[:, :] weights, double[:] biases, # <<<<<<<<<<<<<< * bint force=False, int num_threads=2, * double[:] norm_consts=None, */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_3calculate_moments_parallel, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_moments_parallel, __pyx_t_3) < 0) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "bmtools/exact/moments.pyx":184 * * * def calculate_probs_parallel( # <<<<<<<<<<<<<< * double[:, :] weights, double[:] biases, bint force=False, * int num_threads=2,): */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_7bmtools_5exact_7moments_5calculate_probs_parallel, NULL, __pyx_n_s_bmtools_exact_moments); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_calculate_probs_parallel, __pyx_t_3) < 0) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "bmtools/exact/moments.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * """Boltzmann machine moment calculation. * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":312 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":313 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_7[0] = PyThread_allocate_lock(); __pyx_t_7[1] = PyThread_allocate_lock(); __pyx_t_7[2] = PyThread_allocate_lock(); __pyx_t_7[3] = PyThread_allocate_lock(); __pyx_t_7[4] = PyThread_allocate_lock(); __pyx_t_7[5] = PyThread_allocate_lock(); __pyx_t_7[6] = PyThread_allocate_lock(); __pyx_t_7[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_7, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":535 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":981 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init bmtools.exact.moments", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init bmtools.exact.moments"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; return PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = PyThreadState_GET(); PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif // CPython < 3.6 #endif // CYTHON_FAST_PYCALL /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); PyObject *result; int flags; assert(PyCFunction_Check(func)); assert(METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs, NULL); } #endif // CYTHON_FAST_PYCCALL /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 3, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* None */ static CYTHON_INLINE long __Pyx_pow_long(long b, long e) { long t = b; switch (e) { case 3: t *= b; case 2: t *= b; case 1: return t; case 0: return 1; } #if 1 if (unlikely(e<0)) return 0; #endif t = 1; while (likely(e)) { t *= (b * (e&1)) | ((~e)&1); b *= b; e >>= 1; } return t; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_pow_Py_ssize_t(Py_ssize_t b, Py_ssize_t e) { Py_ssize_t t = b; switch (e) { case 3: t *= b; case 2: t *= b; case 1: return t; case 0: return 1; } #if 1 if (unlikely(e<0)) return 0; #endif t = 1; while (likely(e)) { t *= (b * (e&1)) | ((~e)&1); b *= b; e >>= 1; } return t; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_7bmtools_5exact_7helpers_state_t(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_nn___pyx_t_7bmtools_5exact_7helpers_state_t, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* FunctionExport */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); if (!d) { PyErr_Clear(); d = PyDict_New(); if (!d) goto bad; Py_INCREF(d); if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) goto bad; } tmp.fp = f; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(tmp.p, sig, 0); #else cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); #endif if (!cobj) goto bad; if (PyDict_SetItemString(d, name, cobj) < 0) goto bad; Py_DECREF(cobj); Py_DECREF(d); return 0; bad: Py_XDECREF(cobj); Py_XDECREF(d); return -1; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* VoidPtrImport */ #ifndef __PYX_HAVE_RT_ImportVoidPtr #define __PYX_HAVE_RT_ImportVoidPtr static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig) { PyObject *d = 0; PyObject *cobj = 0; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, name); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C variable %.200s", PyModule_GetName(module), name); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj)); goto bad; } *p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), name, sig, desc); goto bad; } *p = PyCObject_AsVoidPtr(cobj);} #endif if (!(*p)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif #else res = PyNumber_Int(x); #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
t0dem.c
#include <stdio.h> #include "gdal.h" #include <omp.h> /* mod11A1 MODLAND_QC bits [00-11] [00] = class 0 ; LST produced, good quality, not necessary to examine detailed QA [01] = class 1 ; LST produced, unreliable or unquantifiable quality, recommend examination of more detailed QA [10] = class 2 : LST not produced due to cloud effects [11] = class 3 : LST not produced primarily due to reasons other than clouds */ int mod11A1a(int pixel) { return (pixel & 0x03); } void usage() { printf( "-----------------------------------------\n"); printf( "--Modis Processing chain--Serial code----\n"); printf( "-----------------------------------------\n"); printf( "./t0dem inDEM inLST inLST_QA\n"); printf( "\toutT0DEM\n"); printf( "-----------------------------------------\n"); printf( "inDEM\t\tDigital Elevation Model 250m [m]\n"); printf( "inLST\t\tModis LST day 1Km\n"); printf( "inLST_QA\t\tModis LST day 1Km Quality Assessment\n"); printf( "outT0DEM\tAltitude corrected Temperature output [K]\n"); return; } int main( int argc, char *argv[] ) { if( argc < 4 ) { usage(); return 1; } char *inB1 = argv[1]; //DEM char *inB2 = argv[2]; //LST char *inB3 = argv[3]; //LST_QA char *t0demF = argv[4]; GDALAllRegister(); GDALDatasetH hD1 = GDALOpen(inB1,GA_ReadOnly);//DEM GDALDatasetH hD2 = GDALOpen(inB2,GA_ReadOnly);//LST GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//LST_QA if(hD1==NULL||hD2==NULL||hD3==NULL){ printf("One or more input files "); printf("could not be loaded\n"); exit(1); } GDALDriverH hDr1 = GDALGetDatasetDriver(hD1); char **options = NULL; options = CSLSetNameValue( options, "TILED", "YES" ); options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" ); options = CSLSetNameValue( options, "PREDICTOR", "2" ); GDALDatasetH hDOut = GDALCreateCopy(hDr1,t0demF,hD1,FALSE,options,NULL,NULL); GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1); GDALRasterBandH hB1 = GDALGetRasterBand(hD1,1);//DEM GDALRasterBandH hB2 = GDALGetRasterBand(hD2,1);//LST GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//LST_QA int nX = GDALGetRasterBandXSize(hB1); int nY = GDALGetRasterBandYSize(hB1); int N=nX*nY; float * l1 = (float *) malloc(sizeof(float)*N); float * l2 = (float *) malloc(sizeof(float)*N); int * l3 = (int *) malloc(sizeof(int)*N); float * lOut = (float *) malloc(sizeof(float)*N); int rowcol,qa; //DEM 250m GDALRasterIO(hB1,GF_Read,0,0,nX,nY,l1,nX,nY,GDT_Float32,0,0); //LST 1Km GDALRasterIO(hB2,GF_Read,0,0,nX,nY,l2,nX,nY,GDT_Float32,0,0); //LST_QA 1Km GDALRasterIO(hB3,GF_Read,0,0,nX,nY,l3,nX,nY,GDT_Int32,0,0); #pragma omp parallel for default (none) \ private (rowcol, qa) \ shared (N, l1, l2, l3, lOut) for(rowcol=0;rowcol<N;rowcol++){ qa = mod11A1a(l3[rowcol]); if( qa == 0 || qa == 1 ) lOut[rowcol] = (l2[rowcol]*0.02)+0.00627*l1[rowcol]; else lOut[rowcol] = -28768; } #pragma omp barrier GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,lOut,nX,nY,GDT_Float32,0,0); if( l1 != NULL ) free( l1 ); if( l2 != NULL ) free( l2 ); if( l3 != NULL ) free( l3 ); GDALClose(hD1); GDALClose(hD2); GDALClose(hD3); GDALClose(hDOut); return(EXIT_SUCCESS); }
GB_msort_1.c
//------------------------------------------------------------------------------ // GB_msort_1: sort a 1-by-n list of integers //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // A parallel mergesort of an array of 1-by-n integers. #include "GB_msort_1.h" //------------------------------------------------------------------------------ // GB_msort_1_binary_search: binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_msort_1_binary_search // return pleft ( const int64_t *restrict Y_0, // Pivot is Y [pivot] const int64_t pivot, const int64_t *restrict X_0, // search in X [p_start..p_end_-1] const int64_t p_start, const int64_t p_end ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // less = (X [pmiddle] < Pivot) bool less = GB_lt_1 (X_0, pmiddle, Y_0, pivot) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && GB_eq_1 (X_0, pleft, Y_0, pivot) ; // Modify pleft and pright: if (!found && (pleft == pright)) { if (GB_lt_1 (X_0, pleft, Y_0, pivot)) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_msort_1_create_merge_tasks //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. void GB_msort_1_create_merge_tasks ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t pL_start, const int64_t pL_end, const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t pR_start, const int64_t pR_end ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_msort_1_binary_search ( L_0, pleft, R_0, pR_start, pR_end) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_msort_1_binary_search ( R_0, pright, L_0, pL_start, pL_end) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, pL_start, pleft, R_0, pR_start, pright) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, pleft, pL_end, R_0, pright, pR_end) ; } } //------------------------------------------------------------------------------ // GB_msort_1_merge: merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_msort_1_merge ( int64_t *restrict S_0, // output of length nleft + nright const int64_t *restrict Left_0, // left input of length nleft const int64_t nleft, const int64_t *restrict Right_0, // right input of length nright const int64_t nright ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { if (GB_lt_1 (Left_0, pleft, Right_0, pright)) { // S [p] = Left [pleft++] S_0 [p] = Left_0 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] S_0 [p] = Right_0 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_msort_1: parallel mergesort //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_msort_1 // sort array A of size 1-by-n ( int64_t *restrict A_0, // size n array const int64_t n, int nthreads // # of threads to use ) { //-------------------------------------------------------------------------- // handle small problems with a single thread //-------------------------------------------------------------------------- if (nthreads <= 1 || n <= GB_BASECASE) { // sequential quicksort GB_qsort_1 (A_0, n) ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // determine # of tasks //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 qsort leaves // 5 to 16 threads: 6 levels, 64 qsort leaves // 17 to 64 threads: 8 levels, 256 qsort leaves // 65 to 256 threads: 10 levels, 1024 qsort leaves // 256 to 1024 threads: 12 levels, 4096 qsort leaves // ... int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks = 1 << k ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t *restrict W = NULL ; size_t W_size = 0 ; W = GB_MALLOC_WORK (n + 6*ntasks + 1, int64_t, &W_size) ; if (W == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *T = W ; int64_t *restrict W_0 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; GB_qsort_1 (A_0 + leaf, leafsize) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for ( ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, Slice [tid], Slice [tid+nt], A_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_1_merge ( W_0 + pS, A_0 + pL, nL, A_0 + pR, nR) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, Slice [tid], Slice [tid+nt], W_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_1_merge ( A_0 + pS, W_0 + pL, nL, W_0 + pR, nR) ; } nt = 2*nt ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK (&W, W_size) ; return (GrB_SUCCESS) ; }
critical.c
#include <omp.h> int main (void) { int a=0,b=0,c =0; #pragma omp parallel { #pragma omp critical (aaa) a=a+1; #pragma omp critical (bbb) b=b+1; #pragma omp critical c=c+1; } }
GB_unaryop__identity_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_uint16 // op(A') function: GB_tran__identity_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_int32 // op(A') function: GB_unop_tran__identity_uint32_int32 // C type: uint32_t // A type: int32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_int32 ( uint32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
imginputfileconn.h
/** * DeepDetect * Copyright (c) 2014 Emmanuel Benazera * Author: Emmanuel Benazera <beniz@droidnik.fr> * * This file is part of deepdetect. * * deepdetect is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * deepdetect is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with deepdetect. If not, see <http://www.gnu.org/licenses/>. */ #ifndef IMGINPUTFILECONN_H #define IMGINPUTFILECONN_H #include "inputconnectorstrategy.h" #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include "ext/base64/base64.h" #include "utils/apitools.h" #include <random> namespace dd { class DDImg { public: DDImg() {} ~DDImg() {} // base64 detection bool is_within_base64_range(char c) const { if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c == '+' || c=='/' || c=='=')) return true; else return false; } bool possibly_base64(const std::string &s) const { bool ism = is_multiple_four(s); if (!ism) return false; for (char c: s) { bool within_64 = is_within_base64_range(c); if (!within_64) return false; } return true; } bool is_multiple_four(const std::string &s) const { if (s.length() % 4 == 0) return true; else return false; } void scale(const cv::Mat &src, cv::Mat &dst) const { float coef = std::min(static_cast<float>(_scale_max) / std::max(src.rows, src.cols), static_cast<float>(_scale_min) / std::min(src.rows, src.cols)); cv::resize(src, dst, cv::Size(), coef, coef, CV_INTER_CUBIC); } // decode image void decode(const std::string &str) { std::vector<unsigned char> vdat(str.begin(),str.end()); cv::Mat img = cv::Mat(cv::imdecode(cv::Mat(vdat,true), _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR))); _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // XXX - Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } _imgs.push_back(rimg); } // deserialize image, independent of format void deserialize(std::stringstream &input) { size_t size = 0; input.seekg(0,input.end); size = input.tellg(); input.seekg(0,input.beg); char* data = new char[size]; input.read(data, size); std::string str(data,data+size); delete[]data; decode(str); } // data acquisition int read_file(const std::string &fname) { cv::Mat img = cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); if (img.empty()) { _logger->error("empty image {}",fname); return -1; } _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; try { if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } } catch(...) { throw InputConnectorBadParamException("failed resizing image " + fname); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; try { rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch(...) { throw InputConnectorBadParamException("failed cropping image " + fname); } } _imgs.push_back(rimg); return 0; } int read_db(const std::string &fname) { _db_fname = fname; return 0; } int read_mem(const std::string &content) { cv::Mat timg; _b64 = possibly_base64(content); if (_b64) { std::string ccontent; Base64::Decode(content,&ccontent); std::stringstream sstr; sstr << ccontent; deserialize(sstr); } else { decode(content); } if (_imgs.at(0).empty()) return -1; return 0; } int read_dir(const std::string &dir) { // list directories in dir std::unordered_set<std::string> subdirs; if (fileops::list_directory(dir,false,true,false,subdirs)) throw InputConnectorBadParamException("failed reading text subdirectories in data directory " + dir); _logger->info("imginputfileconn: list subdirs size={}",subdirs.size()); // list files and classes std::vector<std::pair<std::string,int>> lfiles; // labeled files std::unordered_map<int,std::string> hcorresp; // correspondence class number / class name if (!subdirs.empty()) { int cl = 0; auto uit = subdirs.begin(); while(uit!=subdirs.end()) { std::unordered_set<std::string> subdir_files; if (fileops::list_directory((*uit),true,false,true,subdir_files)) throw InputConnectorBadParamException("failed reading image data sub-directory " + (*uit)); auto fit = subdir_files.begin(); while(fit!=subdir_files.end()) // XXX: re-iterating the file is not optimal { lfiles.push_back(std::pair<std::string,int>((*fit),cl)); ++fit; } ++cl; ++uit; } } else { std::unordered_set<std::string> test_files; fileops::list_directory(dir,true,false,false,test_files); auto fit = test_files.begin(); while(fit!=test_files.end()) { lfiles.push_back(std::pair<std::string,int>((*fit),-1)); // -1 for no class ++fit; } } // read images _imgs.reserve(lfiles.size()); _img_files.reserve(lfiles.size()); _labels.reserve(lfiles.size()); for (std::pair<std::string,int> &p: lfiles) { cv::Mat img = cv::imread(p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; try { if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } } catch(...) { throw InputConnectorBadParamException("failed resizing image " + p.first); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; try { rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch(...) { throw InputConnectorBadParamException("failed cropping image " + p.first); } } _imgs.push_back(rimg); _img_files.push_back(p.first); if (p.second >= 0) _labels.push_back(p.second); if (_imgs.size() % 1000 == 0) _logger->info("read {} images",_imgs.size()); } return 0; } std::vector<cv::Mat> _imgs; std::vector<std::string> _img_files; std::vector<std::pair<int,int>> _imgs_size; bool _bw = false; bool _b64 = false; bool _unchanged_data = false; std::vector<int> _labels; int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; std::string _db_fname; std::shared_ptr<spdlog::logger> _logger; }; class ImgInputFileConn : public InputConnectorStrategy { public: ImgInputFileConn() :InputConnectorStrategy(){} ImgInputFileConn(const ImgInputFileConn &i) :InputConnectorStrategy(i), _width(i._width),_height(i._height), _crop_width(i._crop_width),_crop_height(i._crop_height), _bw(i._bw),_unchanged_data(i._unchanged_data), _mean(i._mean),_has_mean_scalar(i._has_mean_scalar), _scaled(i._scaled), _scale_min(i._scale_min), _scale_max(i._scale_max) {} ~ImgInputFileConn() {} void init(const APIData &ad) { fillup_parameters(ad); } void fillup_parameters(const APIData &ad) { // optional parameters. if (ad.has("width")) _width = ad.get("width").get<int>(); if (ad.has("height")) _height = ad.get("height").get<int>(); if (ad.has("crop_width")) { _crop_width = ad.get("crop_width").get<int>(); if (_crop_width > _width) { _logger->error("Crop width must be less than or equal to width"); throw InputConnectorBadParamException("Crop width must be less than or equal to width"); } } if (ad.has("crop_height")) { _crop_height = ad.get("crop_height").get<int>(); if (_crop_height > _height) { _logger->error("Crop height must be less than or equal to height"); throw InputConnectorBadParamException("Crop height must be less than or equal to height"); } } if (ad.has("bw")) _bw = ad.get("bw").get<bool>(); if (ad.has("unchanged_data")) _unchanged_data = ad.get("unchanged_data").get<bool>(); if (ad.has("shuffle")) _shuffle = ad.get("shuffle").get<bool>(); if (ad.has("seed")) _seed = ad.get("seed").get<int>(); if (ad.has("test_split")) _test_split = ad.get("test_split").get<double>(); if (ad.has("mean")) { apitools::get_floats(ad, "mean", _mean); _has_mean_scalar = true; } // Variable size if (ad.has("scaled") || ad.has("scale_min") || ad.has("scale_max")) _scaled = true; if (ad.has("scale_min")) _scale_min = ad.get("scale_min").get<int>(); if (ad.has("scale_max")) _scale_max = ad.get("scale_max").get<int>(); } int feature_size() const { if (_bw || _unchanged_data) { // XXX: only valid for single channels if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height; else return _width*_height; } else { // RGB if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height*3; else return _width*_height*3; } } int batch_size() const { return _images.size(); } int test_batch_size() const { return _test_images.size(); } void transform(const APIData &ad) { get_data(ad); if (ad.has("parameters")) // hotplug of parameters, overriding the defaults { APIData ad_param = ad.getobj("parameters"); if (ad_param.has("input")) { fillup_parameters(ad_param.getobj("input")); } } int catch_read = 0; std::string catch_msg; std::vector<std::string> uris; std::vector<std::string> failed_uris; #pragma omp parallel for for (size_t i=0;i<_uris.size();i++) { bool no_img = false; std::string u = _uris.at(i); DataEl<DDImg> dimg; dimg._ctype._bw = _bw; dimg._ctype._unchanged_data = _unchanged_data; dimg._ctype._width = _width; dimg._ctype._height = _height; dimg._ctype._crop_width = _crop_width; dimg._ctype._crop_height = _crop_height; dimg._ctype._scaled = _scaled; dimg._ctype._scale_min = _scale_min; dimg._ctype._scale_max = _scale_max; try { if (dimg.read_element(u,this->_logger)) { _logger->error("no data for image {}",u); no_img = true; } if (!dimg._ctype._db_fname.empty()) _db_fname = dimg._ctype._db_fname; } catch(std::exception &e) { #pragma omp critical { ++catch_read; catch_msg = e.what(); failed_uris.push_back(u); no_img = true; } } if (no_img) continue; if (!_db_fname.empty()) continue; #pragma omp critical { _images.insert(_images.end(), std::make_move_iterator(dimg._ctype._imgs.begin()), std::make_move_iterator(dimg._ctype._imgs.end())); _images_size.insert(_images_size.end(), std::make_move_iterator(dimg._ctype._imgs_size.begin()), std::make_move_iterator(dimg._ctype._imgs_size.end())); if (!dimg._ctype._labels.empty()) _test_labels.insert(_test_labels.end(), std::make_move_iterator(dimg._ctype._labels.begin()), std::make_move_iterator(dimg._ctype._labels.end())); if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1) uris.push_back(u); else if (!dimg._ctype._img_files.empty()) uris.insert(uris.end(), std::make_move_iterator(dimg._ctype._img_files.begin()), std::make_move_iterator(dimg._ctype._img_files.end())); else uris.push_back(std::to_string(i)); } } if (catch_read) { for (auto s: failed_uris) _logger->error("failed reading image {}",s); throw InputConnectorBadParamException(catch_msg); } _uris = uris; if (!_db_fname.empty()) return; // db filename is passed to backend // shuffle before possible split if (_shuffle) { std::mt19937 g; if (_seed >= 0) g = std::mt19937(_seed); else { std::random_device rd; g = std::mt19937(rd()); } std::shuffle(_images.begin(),_images.end(),g); //XXX beware: labels are not shuffled, i.e. let's not shuffle while testing } // split as required if (_test_split > 0) { int split_size = std::floor(_images.size() * (1.0-_test_split)); auto chit = _images.begin(); auto dchit = chit; int cpos = 0; while(chit!=_images.end()) { if (cpos == split_size) { if (dchit == _images.begin()) dchit = chit; _test_images.push_back((*chit)); } else ++cpos; ++chit; } _images.erase(dchit,_images.end()); _logger->info("data split test size={} / remaining data size={}",_test_images.size(),_images.size()); } if (_images.empty()) throw InputConnectorBadParamException("no image could be found"); } // data std::vector<cv::Mat> _images; std::vector<cv::Mat> _test_images; std::vector<int> _test_labels; std::vector<std::pair<int,int>> _images_size; // image parameters int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; bool _bw = false; /**< whether to convert to black & white. */ bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */ double _test_split = 0.0; /**< auto-split of the dataset. */ int _seed = -1; /**< shuffling seed. */ std::vector<float> _mean; /**< mean image pixels, to be subtracted from images. */ bool _has_mean_scalar = false; /**< whether scalar is set. */ std::string _db_fname; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; }; } #ifdef USE_CAFFE #include "caffeinputconns.h" #endif #ifdef USE_TF #include "backends/tf/tfinputconns.h" #endif #ifdef USE_DLIB #include "backends/dlib/dlibinputconns.h" #endif #ifdef USE_NCNN #include "backends/ncnn/ncnninputconns.h" #endif #ifdef USE_CAFFE2 #include "backends/caffe2/caffe2inputconns.h" #endif #endif
timer.h
#ifndef timer_h #define timer_h #include <iomanip> #include <iostream> #include <map> #include <string> #include <sys/time.h> #include <unistd.h> namespace exafmm_t { static const int stringLength = 20; //!< Length of formatted string static const int decimal = 7; //!< Decimal precision static const int wait = 100; //!< Waiting time between output of different ranks static const int dividerLength = stringLength + decimal + 9; // length of output section divider long long flop = 0; timeval time; std::map<std::string, timeval> timer; void print(std::string s) { // if (!VERBOSE | (MPIRANK != 0)) return; s += " "; std::cout << "--- " << std::setw(stringLength) << std::left << std::setfill('-') << s << std::setw(decimal+1) << "-" << std::setfill(' ') << std::endl; } template<typename T> void print(std::string s, T v, bool fixed=true) { std::cout << std::setw(stringLength) << std::left << s << " : "; if(fixed) std::cout << std::setprecision(decimal) << std::fixed << std::scientific; else std::cout << std::setprecision(1) << std::scientific; std::cout << v << std::endl; } void print_divider(std::string s) { s.insert(0, " "); s.append(" "); int halfLength = (dividerLength - s.length()) / 2; std::cout << std::string(halfLength, '-') << s << std::string(dividerLength-halfLength-s.length(), '-') << std::endl; } void add_flop(long long n) { #pragma omp atomic update flop += n; } void start(std::string event) { gettimeofday(&time, NULL); timer[event] = time; } double stop(std::string event, bool verbose=true) { gettimeofday(&time, NULL); double eventTime = time.tv_sec - timer[event].tv_sec + (time.tv_usec - timer[event].tv_usec) * 1e-6; if (verbose) print(event, eventTime); return eventTime; } } #endif
imp_dim_red_parallel.c
// ------------------------------------------------------------------------ // // This file is part of SDRcausal. // // SDRcausal is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by the // Free Software Foundation, either version 3 of the License, or (at your // option) any later version. // // SDRcausal is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License // for more details. // // You should have received a copy of the GNU General Public License along // with SDRcausal. If not, see <https://www.gnu.org/licenses/>. // // ------------------------------------------------------------------------ // #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "omp_test.h" #include "matrix_utilities.h" #include "imp_llr.h" #include "nw_kernel_regress.h" #include "imp_dim_red.h" void imp_dim_red (int n, int p, int d, const double x[n*p], const double x_beta[n*d], const double y[n], const int treated[n], int kernel_spec, double h0, double h11, double h12, double h13, double h14, double gauss_cutoff, int n_threads, int *n_llr_fail, double (*y_out)[d*(p-d)]) { // Creating lower n x (p - d) submatrix of x int p0 = p - d; double x_lower[n*p0]; for (int i=0; i<n; i++) for (int j=d; j<p; j++) x_lower[i*p0 + (j-d)] = x[i*p + j]; double y_out_tmp[d*p0]; for (int i=0; i<d*p0; i++) y_out_tmp[i] = 0; // Storing n_nans int n_nan_tmp = 0; // Calculating eq 2.4 in paper excluding the summation over i #if defined(_OPENMP) #pragma omp parallel for schedule(static) num_threads(n_threads) #endif for (int i=0; i<n; i++) { if (treated[i]) { double x0[d]; for (int j=0; j<d; j++) x0[j] = x_beta[i*d + j]; double m, dm[d]; imp_llr ( n, d, x_beta, x0, y, treated, kernel_spec, h11, h12, h13, h14, gauss_cutoff, &m, &dm); int nan_encountered = 0; for (int j=0; j<d; j++) if (isnan (dm[j])) { #if defined(_OPENMP) #pragma omp atomic #endif n_nan_tmp++; nan_encountered = 1; } if (nan_encountered) continue; double x_distance[n]; for (int j=0; j<n; j++) { double delta_x = 0; for (int k=0; k<d; k++) delta_x += pow (x_beta[j*d + k] - x0[k], 2); x_distance[j] = sqrt (delta_x); } double x_lower_estimate[p0]; nw_kernel_regress ( n, p0, x_distance, x_lower, kernel_spec, h0, gauss_cutoff, &x_lower_estimate); if (d == 1) { double tmp[p0]; for (int j=0; j<p0; j++) tmp[j] = dm[0] * (x_lower[i*p0 + j] - x_lower_estimate[j]); // Row of eq 2.4 before summation for (int j=0; j<p0; j++) { #if defined(_OPENMP) #pragma omp atomic #endif y_out_tmp[j] += (y[i] - m) * tmp[j]; } } else { double tmp1[p0], tmp2[d*p0]; for (int j=0; j<p0; j++) tmp1[j] = x_lower[i*p0 + j] - x_lower_estimate[j]; matrix_multiplication ( d, 1, p0, dm, tmp1, &tmp2); for (int j=0; j<d*p0; j++) (*y_out)[j] += (y[i] - m) * tmp2[j]; } } } memmove (y_out, y_out_tmp, d*p0*sizeof (double)); *n_llr_fail = n_nan_tmp; return; }
oyranos_cmm_oyra_image_ppm.c
/** @file oyranos_cmm_oyra_image.c * * Oyranos is an open source Color Management System * * @par Copyright: * 2008-2015 (C) Kai-Uwe Behrmann * * @brief modules for Oyranos * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2008/10/07 */ #include "oyCMMapi4_s.h" #include "oyCMMapi4_s_.h" #include "oyCMMapi7_s.h" #include "oyCMMapi7_s_.h" #include "oyCMMapiFilters_s.h" #include "oyCMMui_s_.h" #include "oyConnectorImaging_s_.h" #include "oyFilterNode_s_.h" /* for oyFilterNode_TextToInfo_ */ #include "oyRectangle_s_.h" #include "oyranos_config_internal.h" #include "oyranos_cmm.h" #include "oyranos_cmm_oyra.h" #include "oyranos_generic.h" /* oy_connector_imaging_static_object */ #include "oyranos_helper.h" #include "oyranos_icc.h" #include "oyranos_i18n.h" #include "oyranos_io.h" #include "oyranos_definitions.h" #include "oyranos_string.h" #include "oyranos_texts.h" #include <math.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <string.h> typedef uint16_t half; int wread ( unsigned char *data, /* read a word */ size_t pos, size_t max, size_t *start, size_t *length ); oyOptions_s* oyraFilter_ImageOutputPPMValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate, int statical OY_UNUSED, uint32_t * result ) { uint32_t error = !filter; #if 0 oyDATATYPE_e data_type = 0; int planar, channels; oyImage_s * image = 0; if(!error) filter = node->filter; if(!error) error = filter->type_ != oyOBJECT_FILTER_S; if(!error) { if(filter->image_ && filter->image_->layout_) { data_type = oyToDataType_m( filter->image_->layout_[0] ); if(!(data_type == oyUINT8 || data_type == oyUINT16 || data_type == oyFLOAT || data_type == oyDOUBLE )) error = 1; planar = oyToPlanar_m( filter->image_->layout_[0] ); if(!error && planar) error = 1; channels = oyToChannels_m( filter->image_->layout_[0] ); if(!error && channels > 4) error = 1; } } #endif if(!error) error = !oyOptions_FindString( validate, "filename", 0 ); *result = error; return 0; } /** @func oyraFilterPlug_ImageOutputPPMWrite * @brief implement oyCMMFilter_GetNext_f() * * @version Oyranos: 0.3.1 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2011/05/12 */ int oyraFilterPlug_ImageOutputPPMWrite ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { oyFilterSocket_s * socket; oyFilterNode_s * node = 0; oyOptions_s * node_opts = 0; int result = 0; const char * filename = 0; FILE * fp = 0; socket = oyFilterPlug_GetSocket( requestor_plug ); node = oyFilterSocket_GetNode( socket ); node_opts = oyFilterNode_GetOptions( node, 0 ); /* to reuse the requestor_plug is a exception for the starting request */ if(node) result = oyFilterNode_Run( node, requestor_plug, ticket ); else result = 1; if(result <= 0) filename = oyOptions_FindString( node_opts, "filename", 0 ); if(filename) fp = fopen( filename, "wb" ); if(fp) { oyImage_s *image_output = (oyImage_s*)oyFilterSocket_GetData( socket ); const char * comment = oyOptions_FindString( node_opts, "comment", NULL ); fclose (fp); fp = 0; result = oyImage_WritePPM( image_output, filename, comment ? comment : oyFilterNode_GetRelatives( node ) ); } return result; } const char ppm_write_extra_options[] = { "\n\ <" OY_TOP_SHARED ">\n\ <" OY_DOMAIN_INTERNAL ">\n\ <" OY_TYPE_STD ">\n\ <" "file_write" ">\n\ <filename></filename>\n\ <comment></comment>\n\ </" "file_write" ">\n\ </" OY_TYPE_STD ">\n\ </" OY_DOMAIN_INTERNAL ">\n\ </" OY_TOP_SHARED ">\n" }; int oyraPPMwriteUiGet ( oyCMMapiFilter_s * module OY_UNUSED, oyOptions_s * opts OY_UNUSED, int flags OY_UNUSED, char ** xforms_layout, oyAlloc_f allocateFunc ) { char * text = (char*)allocateFunc(5); text[0] = 0; *xforms_layout = text; return 0; } oyDATATYPE_e oyra_image_ppm_data_types[6] = {oyUINT8, oyUINT16, oyHALF, oyFLOAT, oyDOUBLE, 0}; oyConnectorImaging_s_ oyra_imageOutputPPM_connector_out = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorSocketText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 0, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 32, /* max_channels_count; */ 1, /* min_color_count; */ 32, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 1, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_socket[2] = { &oyra_imageOutputPPM_connector_out, 0 }; oyConnectorImaging_s_ oyra_imageOutputPPM_connector_in = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorPlugText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 1, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 4, /* max_channels_count; */ 1, /* min_color_count; */ 4, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 2, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_plug[2] = { &oyra_imageOutputPPM_connector_in, 0 }; /** * This function implements oyCMMGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/22 (Oyranos: 0.1.10) * @date 2009/12/22 */ const char * oyraApi4ImageWriteUiGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { static char * category = 0; if(strcmp(select,"name") == 0) { if(type == oyNAME_NICK) return "write_ppm"; else if(type == oyNAME_NAME) return _("Image[write_ppm]"); else return _("Write PPM Image Filter Object"); } else if(strcmp(select,"category") == 0) { if(!category) { STRING_ADD( category, _("Files") ); STRING_ADD( category, _("/") ); STRING_ADD( category, _("Write PPM") ); } if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return category; else return category; } else if(strcmp(select,"help") == 0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Option \"filename\", a valid filename"); else return _("The Option \"filename\" should contain a valid filename to write the ppm data into. A existing file will be overwritten without notice."); } return 0; } const char * oyra_api4_image_write_ppm_ui_texts[] = {"name", "category", "help", 0}; /** @brief oyra oyCMMapi4_s::ui implementation * * The UI for filter write ppm. * * @version Oyranos: 0.1.10 * @since 2009/09/09 (Oyranos: 0.1.10) * @date 2009/12/22 */ oyCMMui_s_ oyra_api4_image_write_ppm_ui = { oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */ 0,0,0, /* unused oyStruct_s fields; keep to zero */ CMM_VERSION, /**< int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ oyraFilter_ImageOutputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */ oyraWidgetEvent, /* oyWidgetEvent_f */ "Files/Write PPM", /* category */ ppm_write_extra_options, /* const char * options */ oyraPPMwriteUiGet, /* oyCMMuiGet_f oyCMMuiGet */ oyraApi4ImageWriteUiGetText, /* oyCMMGetText_f getText */ oyra_api4_image_write_ppm_ui_texts, /* const char ** texts */ (oyCMMapiFilter_s*)&oyra_api4_image_write_ppm /* oyCMMapiFilter_s*parent */ }; /** @brief oyra oyCMMapi4_s implementation * * A filter writing a PPM image. * * @par Options: * - "filename" - the file name to write to * * @version Oyranos: 0.1.8 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2008/10/07 */ oyCMMapi4_s_ oyra_api4_image_write_ppm = { oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api7_image_write_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ (oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */ 0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */ {0}, /* char context_type[8] */ (oyCMMui_s_*)&oyra_api4_image_write_ppm_ui /**< oyCMMui_s *ui */ }; char * oyra_api7_image_output_ppm_properties[] = { "file=write", /* file read|write */ "image=pixel", /* image type, pixel/vector/font */ "layers=1", /* layer count, one for plain images */ "icc=0", /* image type ICC profile support */ "ext=ppm,pnm,pbm,pgm,pfm", /* supported extensions */ 0 }; /** @brief oyra oyCMMapi7_s implementation * * A filter writing a PPM image. * * @par Options: * - "filename" - the file name to write to * * @version Oyranos: 0.1.8 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2008/10/07 */ oyCMMapi7_s_ oyra_api7_image_write_ppm = { oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api4_image_input_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ oyraFilterPlug_ImageOutputPPMWrite, /* oyCMMFilterPlug_Run_f */ {0}, /* char data_type[8] */ (oyConnector_s**) oyra_imageOutputPPM_connectors_plug, /* plugs */ 1, /* plugs_n */ 0, /* plugs_last_add */ (oyConnector_s**) oyra_imageOutputPPM_connectors_socket, /* sockets */ 1, /* sockets_n */ 0, /* sockets_last_add */ oyra_api7_image_output_ppm_properties /* char * properties */ }; /* ---------------------------------------------------------------------------*/ oyOptions_s* oyraFilter_ImageInputPPMValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate, int statical OY_UNUSED, uint32_t * result ) { uint32_t error = !filter; if(!error) error = !oyOptions_FindString( validate, "filename", 0 ); *result = error; return 0; } int wread ( unsigned char* data, size_t pos, size_t max, size_t *start, size_t *end ) { int end_found = 0; if( max <= 1 ) return 0; while(pos < max && isspace( data[pos] )) ++pos; *start = pos; while(pos < max && !end_found) { if( isspace( data[pos] ) ) { end_found = 1; break; } else ++pos; } *end = pos; return end_found; } /** @func oyraFilterPlug_ImageInputPPMRun * @brief implement oyCMMFilter_GetNext_f() * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ int oyraFilterPlug_ImageInputPPMRun ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { oyFilterSocket_s * socket = 0; oyStruct_s * socket_data = 0; oyFilterNode_s * node = 0; oyOptions_s * tags = 0; int error = 0; const char * filename = 0; FILE * fp = 0; oyDATATYPE_e data_type = oyUINT8; oyPROFILE_e profile_type = oyEDITING_RGB; oyProfile_s * prof = 0; oyImage_s * image_in = 0, * output_image = 0; oyPixel_t pixel_type = 0; int fsize = 0; size_t fpos = 0; uint8_t * data = 0, * buf = 0; size_t mem_n = 0; /* needed memory in bytes */ int info_good = 1; int32_t icc_profile_flags = 0; int type = 0; /* PNM type */ int width = 0; int height = 0; int spp = 0; /* samples per pixel */ int byteps = 1; /* byte per sample */ double maxval = 0; size_t start, end; if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S) { socket = oyFilterPlug_GetSocket( requestor_plug ); socket_data = oyFilterSocket_GetData( socket ); } /* passing through the data reading */ if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S && socket_data) { error = oyraFilterPlug_ImageRootRun( requestor_plug, ticket ); return error; } else if(requestor_plug->type_ == oyOBJECT_FILTER_SOCKET_S) { /* To open the a image here seems not so straight forward. * Still the plug-in should be prepared to initialise the image data before * normal processing occurs. */ socket = oyFilterSocket_Copy( (oyFilterSocket_s*)requestor_plug, 0 ); requestor_plug = 0; } node = oyFilterSocket_GetNode( socket ); if(error <= 0) { oyOptions_s * opts = oyFilterNode_GetOptions( node ,0 ); filename = oyOptions_FindString( opts, "filename", 0 ); oyOptions_FindInt( opts, "icc_profile_flags", 0, &icc_profile_flags ); oyOptions_Release( &opts ); } if(filename) fp = fopen( filename, "rm" ); if(!fp) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " could not open: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) ); return 1; } fseek(fp,0L,SEEK_END); fsize = ftell(fp); rewind(fp); oyAllocHelper_m_( data, uint8_t, fsize, 0, fclose(fp); return 1); fpos = fread( data, sizeof(uint8_t), fsize, fp ); if( fpos < (size_t)fsize ) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " could not read: %s %d %d", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ), fsize, (int)fpos ); oyFree_m_( data ) fclose (fp); return FALSE; } fpos = 0; fclose (fp); fp = NULL; /* parse Infos */ if(data[fpos] == 'P') { if(isdigit(data[++fpos])) { char tmp[2] = {0, 0}; tmp[0] = data[fpos]; type = atoi(tmp); } else if (!isspace(data[fpos])) { if(data[fpos] == 'F') /* PFM rgb */ type = -6; else if (data[fpos] == 'f') /* PFM gray */ type = -5; else if(data[fpos] == 'H') /* PFM Half rgb */ type = -9; else if (data[fpos] == 'h') /* PFM Half gray */ type = -8; else info_good = 0; } else info_good = 0; } fpos++; /* parse variables */ { int in_c = 0; /* within comment */ int v_read = 0; /* number of variables allready read */ int v_need = 3; /* number of needed variable; start with three */ int l_end = 0; /* line end position */ int l_pos = 0; /* line position */ int l_rdg = 1; /* line reading */ char * tupltype = NULL; /* ICC profile internal color space */ int tupl = 0; if(type == 1 || type == 4) v_need = 2; if(type == 7) /* pam */ v_need = 12; while(v_read < v_need && info_good) { l_pos = l_end = fpos; l_rdg = 1; /* read line */ while(fpos < (size_t)fsize && l_rdg) { if(data[fpos-1] == '\n' && data[fpos] == '#') { in_c = 1; l_end = fpos-1; } else if(data[fpos] == 10 || data[fpos] == 13) { /* line break */ l_rdg = 0; } else if(data[fpos] != 0) { if(!in_c) ++l_end; } else { l_rdg = 0; } if(!l_rdg) { in_c = 0; } ++fpos; } /* lockup color space */ if(fpos - l_pos > 0) { if(fpos - l_pos >= 14 && memcmp(&data[l_pos],"# COLORSPACE: ", 14) == 0) { char * t = oyAllocateFunc_(fpos - l_pos + 1); if(t) { memcpy( t, &data[l_pos+14], fpos - l_pos - 15 ); t[fpos - l_pos - 15] = 0; prof = oyProfile_FromName(t, icc_profile_flags, NULL); if(prof) { if(oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "found ICC: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); } else oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find ICC: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); oyDeAllocateFunc_(t); } } } if(!prof && getenv("COLORSPACE")) { const char * t = getenv("COLORSPACE"); prof = oyProfile_FromName(t, icc_profile_flags, NULL); if(!prof) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); } /* parse line */ while(info_good && v_read < v_need && l_pos < l_end) { if( info_good ) { double var = -2; char var_s[64]; int l = 0; wread ( data, l_pos, l_end, &start, &end ); l = end - start; if ( l < 63 ) { memcpy(var_s, &data[start], l); var_s[l] = 0; oyStringToDouble(var_s, &var); # ifdef DEBUG_ fprintf(stderr, "var = \"%s\" %d\n",var_s, l); # endif } l_pos = end + 1; if(type == 7) { if(height == -1) height = (int)var; if(width == -1) width = (int)var; if(spp == -1) spp = (int)var; if(maxval == -0.5) maxval = var; if(tupl == -1) { tupl = 1; tupltype = oyStringCopy(var_s, oyAllocateFunc_); } if(strcmp(var_s, "HEIGHT") == 0) height = -1; /* expecting the next token is the val */ if(strcmp(var_s, "WIDTH") == 0) width = -1; if(strcmp(var_s, "DEPTH") == 0) spp = -1; if(strcmp(var_s, "MAXVAL") == 0) maxval = -0.5; if(strcmp(var_s, "TUPLTYPE") == 0) tupl = -1; if(strcmp(var_s, "ENDHDR") == 0) v_need = v_read; } else { if (!var) info_good = 0; if(v_read == 0) width = (int)var; else if(v_read == 1) height = (int)var; else if(v_read == 2) maxval = var; } ++v_read; } } } if(tupltype && !prof) { const char * colorspace = "rgbi"; if(strcmp(tupltype, "GRAY") == 0 || strcmp(tupltype, "GRAY_ALPHA") == 0) colorspace = "grayi"; if(strcmp(tupltype, "RGB") == 0 || strcmp(tupltype, "RGB_ALPHA") == 0) colorspace = "rgbi"; if(strcmp(tupltype, "CMYK") == 0 || strcmp(tupltype, "CMYK_ALPHA") == 0) colorspace = "cmyki"; prof = oyProfile_FromName( colorspace, icc_profile_flags, NULL ); if(!prof) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( tupltype ) ); oyFree_m_(tupltype) } } if(strstr(strrchr(filename, '.')+1, "raw")) { const char * t; info_good = 1; t = getenv("RAW_WIDTH"); if(t) width = atoi(t); else info_good = 0; t = getenv("RAW_HEIGHT"); if(t) height = atoi(t); else info_good = 0; t = getenv("RAW_TYPE"); if(t) type = atoi(t); else info_good = 0; fpos = 0; t = getenv("RAW_MAXVAL"); if(t) maxval = atoi(t); else info_good = 0; if(info_good == 0) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "need RAW_WIDTH, RAW_HEIGHT, RAW_TYPE and RAW_MAXVAL environment variables", OY_DBG_ARGS_ ); } if(info_good) switch(type) { case 1: case 4: data_type = oyUINT8; spp = 1; info_good = 0; break; case 2: case 5: if(maxval <= 255) { data_type = oyUINT8; byteps = 1; } else if (maxval <= 65535) { data_type = oyUINT16; byteps = 2; } spp = 1; break; case 3: case 6: if(maxval <= 255) { data_type = oyUINT8; byteps = 1; } else if (maxval <= 65535) { data_type = oyUINT16; byteps = 2; } spp = 3; break; case -5: data_type = oyFLOAT; byteps = 4; spp = 1; break; case -6: byteps = 4; spp = 3; data_type = oyFLOAT; break; case -8: data_type = oyHALF; byteps = 2; spp = 1; break; case -9: byteps = 2; spp = 3; data_type = oyHALF; break; case 7: /* pam */ if (maxval == 1.0 || maxval == -1.0) { byteps = 4; data_type = oyFLOAT; } else if(maxval <= 255) { byteps = 1; data_type = oyUINT8; } else if (maxval <= 65535) { byteps = 2; data_type = oyUINT16; } break; default: info_good = 0; } switch(spp) { case 1: profile_type = oyASSUMED_GRAY; break; case 2: profile_type = oyASSUMED_GRAY; break; case 3: profile_type = oyASSUMED_RGB; break; case 4: profile_type = oyASSUMED_RGB; break; } if( !info_good ) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "failed to get info of %s", OY_DBG_ARGS_, oyNoEmptyString_m_( filename )); oyFree_m_( data ) return FALSE; } /* check if the file can hold the expected data (for raw only) */ mem_n = width*height*byteps*spp; if(type == 5 || type == 6 || type == -5 || type == -6 || type == -8 || type == -9 || type == 7) { if (mem_n > fsize-fpos) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n storage size of %s is too small: %d", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ), (int)mem_n-fsize-fpos ); oyFree_m_( data ) return FALSE; } } else { if (type == 2 || type == 3) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n %s contains ascii data, which are not handled by this pnm reader", OY_DBG_ARGS_, oyNoEmptyString_m_( filename )); } else if (type == 1 || type == 4) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n %s contains bitmap data, which are not handled by this pnm reader", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) ); } oyFree_m_( data ) return FALSE; } oyAllocHelper_m_( buf, uint8_t, mem_n, 0, oyFree_m_( data ); return 1); DBG_NUM2_S("allocate image data: 0x%x size: %d ", (int)(intptr_t) buf, mem_n ); /* the following code is almost completely taken from ku.b's ppm CP plug-in */ { int h, j_h = 0, p, n_samples = 0, n_bytes = 0; int byte_swap = 0; unsigned char *d_8 = 0; unsigned char *src = &data[fpos]; uint16_t *d_16; half *d_f16; float *d_f; int adapt = 0; if(oyBigEndian()) { if( maxval < 0 && (byteps == 2 || byteps == 4) ) byte_swap = 1; } else { if( maxval > 0 && (byteps == 2 || byteps == 4) ) byte_swap = 1; } maxval = fabs(maxval); for(h = 0; h < height; ++h) { n_samples = 1 * width * spp; n_bytes = n_samples * byteps; d_8 = buf; d_16 = (uint16_t*)buf; d_f16= (half*)buf; d_f = (float*)buf; /* TODO 1 bit raw and ascii */ if (type == 1 || type == 4) { /* TODO ascii */ } else if (type == 2 || type == 3) { /* raw and floats */ } else if (type == 5 || type == 6 || type == -5 || type == -6 || type == -8 || type == -9 || type == 7 ) { if(byteps == 1) { d_8 = &src[ h * width * spp * byteps ]; } else if(byteps == 2) { d_f16 = d_16 = (uint16_t*)& src[ h * width * spp * byteps ]; } else if(byteps == 4) { d_f = (float*)&src[ h * width * spp * byteps ]; } memcpy (&buf[ h * width * spp * byteps ], &src[ (j_h + h) * width * spp * byteps ], 1 * width * spp * byteps); } /* normalise and byteswap */ if( byte_swap ) { unsigned char *c_buf = &buf[ h * width * spp * byteps ]; char tmp; adapt |= 1; if (byteps == 2) { /* 16 bit */ #pragma omp parallel for private(tmp) for (p = 0; p < n_bytes; p += 2) { tmp = c_buf[p]; c_buf[p] = c_buf[p+1]; c_buf[p+1] = tmp; } } else if (byteps == 4) { /* float */ #pragma omp parallel for private(tmp) for (p = 0; p < n_bytes; p += 4) { tmp = c_buf[p]; c_buf[p] = c_buf[p+3]; c_buf[p+3] = tmp; tmp = c_buf[p+1]; c_buf[p+1] = c_buf[p+2]; c_buf[p+2] = tmp; } } } if (byteps == 1 && maxval < 255) { /* 8 bit */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_8[p] = (d_8[p] * 255) / maxval; } else if (byteps == 2 && maxval != 1.0 && (type == -8 || type == -9)) { /* half float */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_f16[p] = d_f16[p] * maxval; } else if (byteps == 2 && maxval < 65535 && type != -8 && type != -9) {/* 16 bit */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_16 [p] = (d_16[p] * 65535) / maxval; } else if (byteps == 4 && maxval != 1.0) { /* float */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_f[p] = d_f[p] * maxval; } } if((adapt & 1) && oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "going to swap bytes %d %d", OY_DBG_ARGS_, byteps, n_bytes ); if((adapt & 2) && oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "going to adapt intensity %g %d", OY_DBG_ARGS_, maxval, n_samples ); } pixel_type = oyChannels_m(spp) | oyDataType_m(data_type); if(!prof) prof = oyProfile_FromStd( profile_type, icc_profile_flags, 0 ); image_in = oyImage_Create( width, height, buf, pixel_type, prof, 0 ); if (!image_in) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "PNM can't create a new image\n%dx%d %d", OY_DBG_ARGS_, width, height, pixel_type ); oyFree_m_ (data) return FALSE; } tags = oyImage_GetTags( image_in ); error = oyOptions_SetFromString( &tags, "//" OY_TYPE_STD "/file_read.input_ppm" "/filename", filename, OY_CREATE_NEW ); oyOptions_Release( &tags ); if(error <= 0) { oyFilterSocket_SetData( socket, (oyStruct_s*)image_in ); } if(ticket) output_image = oyPixelAccess_GetOutputImage( ticket ); if(ticket && output_image && oyImage_GetWidth( output_image ) == 0 && oyImage_GetHeight( output_image ) == 0) { oyImage_SetCritical( output_image, oyImage_GetPixelLayout( image_in, oyLAYOUT ), 0,0, oyImage_GetWidth( image_in ), oyImage_GetHeight( image_in ) ); } oyImage_Release( &image_in ); oyImage_Release( &output_image ); oyFilterNode_Release( &node ); oyFilterSocket_Release( &socket ); oyFree_m_ (data) /* return an error to cause the graph to retry */ return 1; } const char ppm_read_extra_options[] = { "\n\ <" OY_TOP_SHARED ">\n\ <" OY_DOMAIN_INTERNAL ">\n\ <" OY_TYPE_STD ">\n\ <" "file_read" ">\n\ <filename></filename>\n\ </" "file_read" ">\n\ </" OY_TYPE_STD ">\n\ </" OY_DOMAIN_INTERNAL ">\n\ </" OY_TOP_SHARED ">\n" }; int oyraPPMreadUiGet ( oyCMMapiFilter_s * module OY_UNUSED, oyOptions_s * opts OY_UNUSED, int flags OY_UNUSED, char ** xforms_layout, oyAlloc_f allocateFunc ) { char * text = (char*)allocateFunc(5); text[0] = 0; *xforms_layout = text; return 0; } oyConnectorImaging_s_ oyra_imageInputPPM_connector = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorSocketText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 0, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 4, /* max_channels_count; */ 1, /* min_color_count; */ 4, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 1, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageInputPPM_connectors[2] = { &oyra_imageInputPPM_connector, 0 }; /** * This function implements oyCMMGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/22 (Oyranos: 0.1.10) * @date 2009/12/22 */ const char * oyraApi4ImageInputUiGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { static char * category = 0; if(strcmp(select,"name") == 0) { if(type == oyNAME_NICK) return "input_ppm"; else if(type == oyNAME_NAME) return _("Image[input_ppm]"); else return _("Input PPM Image Filter Object"); } else if(strcmp(select,"category") == 0) { if(!category) { STRING_ADD( category, _("Files") ); STRING_ADD( category, _("/") ); STRING_ADD( category, _("Read PPM") ); } if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return category; else return category; } else if(strcmp(select,"help") == 0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Option \"filename\", a valid filename of a existing PPM image"); else return _("The Option \"filename\" should contain a valid filename to read the ppm data from. If the file does not exist, a error will occure.\nThe oyEDITING_RGB ICC profile is attached."); } return 0; } const char * oyra_api4_image_input_ppm_ui_texts[] = {"name", "category", "help", 0}; /** @brief oyra oyCMMapi4_s::ui implementation * * The UI for filter input ppm. * * @version Oyranos: 0.1.10 * @since 2009/09/09 (Oyranos: 0.1.10) * @date 2009/09/09 */ oyCMMui_s_ oyra_api4_ui_image_input_ppm = { oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */ 0,0,0, /* unused oyStruct_s fields; keep to zero */ CMM_VERSION, /**< int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ oyraFilter_ImageInputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */ oyraWidgetEvent, /* oyWidgetEvent_f */ "Files/Read PPM", /* category */ ppm_read_extra_options, /* const char * options */ oyraPPMreadUiGet, /* oyCMMuiGet_f oyCMMuiGet */ oyraApi4ImageInputUiGetText, /* oyCMMGetText_f getText */ oyra_api4_image_input_ppm_ui_texts, /* const char ** texts */ (oyCMMapiFilter_s*)&oyra_api4_image_input_ppm /* oyCMMapiFilter_s*parent */ }; /** @brief oyra oyCMMapi4_s implementation * * A filter for reading a PPM image. * * @par Options: * - "filename" - the file name to read from * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ oyCMMapi4_s_ oyra_api4_image_input_ppm = { oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api7_image_input_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ (oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */ 0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */ {0}, /* char context_type[8] */ (oyCMMui_s_*)&oyra_api4_ui_image_input_ppm /**< oyCMMui_s *ui */ }; char * oyra_api7_image_input_ppm_properties[] = { "file=read", /* file read|write */ "image=pixel", /* image type, pixel/vector/font */ "layers=1", /* layer count, one for plain images */ "icc=1", /* image type ICC profile support */ "ext=pam,ppm,pnm,pbm,pgm,pfm,raw", /* supported extensions */ 0 }; /** @brief oyra oyCMMapi7_s implementation * * A filter reading a PPM image. * * @par Options: * - "filename" - the file name to read from * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ oyCMMapi7_s_ oyra_api7_image_input_ppm = { oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api4_image_load, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ oyraFilterPlug_ImageInputPPMRun, /* oyCMMFilterPlug_Run_f */ {0}, /* char data_type[8] */ 0, /* plugs */ 0, /* plugs_n */ 0, /* plugs_last_add */ (oyConnector_s**) oyra_imageInputPPM_connectors, /* sockets */ 1, /* sockets_n */ 0, /* sockets_last_add */ oyra_api7_image_input_ppm_properties /* char ** properties */ };
cfd.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "arraymalloc.h" #include "boundary.h" #include "jacobi.h" #include "cfdio.h" int main(int argc, char **argv) { int printfreq=1000; //output frequency double error, bnorm; double tolerance=0.0; //tolerance for convergence. <=0 means do not check //main arrays double **psi, **zet; //temporary versions of main arrays double **psitmp, **zettmp; //command line arguments int scalefactor, numiter; double re; // Reynold's number - must be less than 3.7 //simulation sizes int bbase=10; int hbase=15; int wbase=5; int mbase=32; int nbase=32; int irrotational = 1, checkerr = 0; int m,n,b,h,w; int iter; int i,j; // OpenMP variables int nthread; double tstart, tstop, ttot, titer; //do we stop because of tolerance? if (tolerance > 0) {checkerr=1;} //check command line parameters and parse them if (argc <3|| argc >4) { printf("Usage: cfd <scale> <numiter> [reynolds]\n"); return 0; } scalefactor=atoi(argv[1]); numiter=atoi(argv[2]); if (argc == 4) { re=atof(argv[3]); irrotational=0; } else { re=-1.0; } if(!checkerr) { printf("Scale Factor = %i, iterations = %i\n",scalefactor, numiter); } else { printf("Scale Factor = %i, iterations = %i, tolerance= %g\n",scalefactor,numiter,tolerance); } if (irrotational) { printf("Irrotational flow\n"); } else { printf("Reynolds number = %f\n",re); } //Calculate b, h & w and m & n b = bbase*scalefactor; h = hbase*scalefactor; w = wbase*scalefactor; m = mbase*scalefactor; n = nbase*scalefactor; re = re / (double)scalefactor; nthread = omp_get_max_threads(); printf("Running CFD on %d x %d grid using %d thread(s)\n",m,n,nthread); //allocate arrays psi = (double **) arraymalloc2d(m+2,n+2,sizeof(double)); psitmp = (double **) arraymalloc2d(m+2,n+2,sizeof(double)); //zero the psi array for (i=0;i<m+2;i++) { for(j=0;j<n+2;j++) { psi[i][j]=0.0; } } if (!irrotational) { //allocate arrays zet = (double **) arraymalloc2d(m+2,n+2,sizeof(double)); zettmp =(double **) arraymalloc2d(m+2,n+2,sizeof(double)); //zero the zeta array for (i=0;i<m+2;i++) { for(j=0;j<n+2;j++) { zet[i][j]=0.0; } } } //set the psi boundary conditions boundarypsi(psi,m,n,b,h,w); //compute normalisation factor for error bnorm=0.0; for (i=0;i<m+2;i++) { for (j=0;j<n+2;j++) { bnorm += psi[i][j]*psi[i][j]; } } if (!irrotational) { //update zeta BCs that depend on psi boundaryzet(zet,psi,m,n); //update normalisation for (i=0;i<m+2;i++) { for (j=0;j<n+2;j++) { bnorm += zet[i][j]*zet[i][j]; } } } bnorm=sqrt(bnorm); //begin iterative Jacobi loop printf("\nStarting main loop...\n\n"); tstart=gettime(); for(iter=1;iter<=numiter;iter++) { //calculate psi for next iteration if (irrotational) { jacobistep(psitmp,psi,m,n); } else { jacobistepvort(zettmp,psitmp,zet,psi,m,n,re); } //calculate current error if required if (checkerr || iter == numiter) { error = deltasq(psitmp,psi,m,n); if(!irrotational) { error += deltasq(zettmp,zet,m,n); } error=sqrt(error); error=error/bnorm; } //copy back #pragma omp parallel for default(none) private(i,j) shared(psi,psitmp,m,n) for(i=1;i<=m;i++) { for(j=1;j<=n;j++) { psi[i][j]=psitmp[i][j]; } } if (!irrotational) { #pragma omp parallel for default(none) private(i,j) shared(zet,zettmp,m,n) for(i=1;i<=m;i++) { for(j=1;j<=n;j++) { zet[i][j]=zettmp[i][j]; } } } if (!irrotational) { //update zeta BCs that depend on psi boundaryzet(zet,psi,m,n); } //quit early if we have reached required tolerance if (checkerr) { if (error < tolerance) { printf("Converged on iteration %d\n",iter); break; } } //print loop information if(iter%printfreq == 0) { if (!checkerr) { printf("Completed iteration %d\n",iter); } else { printf("Completed iteration %d, error = %g\n",iter,error); } } } if (iter > numiter) iter=numiter; tstop=gettime(); ttot=tstop-tstart; titer=ttot/(double)iter; //print out some stats printf("\n... finished\n"); printf("After %d iterations, the error is %g\n",iter,error); printf("Time for %d iterations was %g seconds\n",iter,ttot); printf("Each iteration took %g seconds\n",titer); //output results writedatafiles(psi,m,n, scalefactor); writeplotfile(m,n,scalefactor); //free un-needed arrays free(psi); free(psitmp); if (!irrotational) { free(zet); free(zettmp); } printf("... finished\n"); return 0; }
pcg.h
/* * PCG Random Number Generation for C. * * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For additional information about the PCG random number generation scheme, * including its license and other licensing options, visit * * http://www.pcg-random.org */ /* * This code includes openMP pragmas for offloading, added by S. Akaslompolo. * * This code is derived from the full C implementation, which is in turn * derived from the canonical C++ PCG implementation. The C++ version * has many additional features and is preferable if you can use C++ in * your project. */ #ifndef PCG_BASIC_H_INCLUDED #define PCG_BASIC_H_INCLUDED 1 #include <inttypes.h> #if __cplusplus extern "C" { #endif struct pcg_state_setseq_64 { // Internals are *Private*. uint64_t state; // RNG state. All values are possible. uint64_t inc; // Controls which RNG sequence (stream) is // selected. Must *always* be odd. }; typedef struct pcg_state_setseq_64 pcg32_random_t; // If you *must* statically initialize it, here's one. #define PCG32_INITIALIZER { 0x853c49e6748fea9bULL, 0xda3e39cb94b95bdbULL } #pragma omp declare target // pcg32_srandom(initstate, initseq) // pcg32_srandom_r(rng, initstate, initseq): // Seed the rng. Specified in two parts, state initializer and a // sequence selection constant (a.k.a. stream id) void pcg32_srandom(uint64_t initstate, uint64_t initseq); void pcg32_srandom_r(pcg32_random_t* rng, uint64_t initstate, uint64_t initseq); // pcg32_random() // pcg32_random_r(rng) // Generate a uniformly distributed 32-bit random number uint32_t pcg32_random(void); uint32_t pcg32_random_r(pcg32_random_t* rng); // pcg32_boundedrand(bound): // pcg32_boundedrand_r(rng, bound): // Generate a uniformly distributed number, r, where 0 <= r < bound uint32_t pcg32_boundedrand(uint32_t bound); uint32_t pcg32_boundedrand_r(pcg32_random_t* rng, uint32_t bound); #pragma omp end declare target #if __cplusplus } #endif #endif // PCG_BASIC_H_INCLUDED
exam-evaluator.c
/****************************************************************************** * FILE: evaluator_omp.c * USE: $ gcc -fopenmp -std=c99 -Wall exam-evaluator.c $ ./a.out [Solved quizes folder] [Number of Threads] * DESCRIPTION: * * AUTHOR: Ricardo Reais * LAST REVISED: 16/11/16 ******************************************************************************/ #define MASTER 0 #define _BSD_SOURCE #include <stdio.h> #include <stdbool.h> #include <math.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include <limits.h> #include <sys/types.h> #include <dirent.h> #include <unistd.h> #include <errno.h> #include <omp.h> #include "strct.h" Evaluation get_evaluation(QSolved qs) { int n = qs.nQuestions; char **answerResults = strings_new(n); int nCorrectAnswers = 0; float correctAnswersPercentage = 0.0; int studentPoints = 0; int totalRating = 0; int studentResult = 0; for (int i = 0; i < n; i++) { if(strcmp(qs.solutions[i], qs.answers[i]) == 0) { answerResults[i] = "Correct "; nCorrectAnswers++; studentPoints += qs.ratings[i]; } else answerResults[i]="Incorrect "; totalRating+=qs.ratings[i]; } correctAnswersPercentage = (float)nCorrectAnswers / n * 100.0; studentResult = 20 * studentPoints / totalRating; return evaluation(answerResults, nCorrectAnswers, n, correctAnswersPercentage, studentPoints, totalRating, studentResult); } void test_quiz_read_write(char *fileName) { FILE *f = fopen(fileName, "r"); QSolved qs = qSolved_read(f); fclose(f); Evaluation e = get_evaluation(qs); QEvaluated qe = qEvaluated(qs.quizID, qs.studentID, qs.answers, e); qEvaluated_write_for_client(qe, stdout); } char* concat(const char *s1, const char *s2) { char *result = string_new(strlen(s1)+strlen(s2)+1);//+1 for the zero-terminator //in real code you would check for errors in malloc here strcpy(result, s1); strcat(result, s2); return result; } char *make_valid_dir(char *dirName) //Avoid error with directory not ending with / { int n = strlen(dirName); if(dirName[n-1] != '/') return concat(dirName, "/"); else return dirName; } void evalute(DIR *d, struct dirent * entry, char *strFinal, char *dirName) { strFinal = concat(dirName, entry->d_name); test_quiz_read_write(strFinal); free(strFinal); } void multiple_quiz_final_evaluation(char *dirName, int NTHREADS) { FILE *ls = popen("ls --quoting-style=escape -U Q1 | wc -l", "r"); int numOfFiles; fscanf(ls, "%d", &numOfFiles); struct dirent * entry; dirName = make_valid_dir(dirName); DIR *d = opendir( dirName ); char *strFinal; omp_set_num_threads(NTHREADS); if (d == 0) perror("opendir"); else #pragma omp parallel shared(d, dirName, strFinal, numOfFiles) default(none) { if(omp_get_thread_num() == MASTER) printf("NUMBER OF THREADS=%d\n", omp_get_num_threads()); #pragma omp for private(entry) schedule(dynamic, 1) for(int i = 0; i < numOfFiles+2; i++) if((entry = readdir(d))) { if (!strcmp (entry->d_name, ".") || !strcmp (entry->d_name, "..")); else evalute(d, entry, strFinal, dirName); } } printf("Finished!\n"); closedir(d); pclose(ls); } int main (int argc, char **argv) { if(argc > 2) multiple_quiz_final_evaluation(argv[1], atoi(argv[2])); else if (argc > 1) printf("Please insert number of threads!\n"); else if (argc > 0) printf("Please insert number of threads!\nPlease insert a folder with exams!\n"); return 0; }
csr.h
#ifndef __CSR_H #define __CSR_H #include "complex_ops.h" #include "utils.h" #include <iostream> #include <iomanip> void write(const float& a){ std::cout << a; } void write(const double& a){ std::cout << a; } void write(const npy_cfloat_wrapper& a){ std::cout << a.real << std::setw(20) << a.imag; } void write(const npy_cdouble_wrapper& a){ std::cout << a.real << std::setw(20) << a.imag; } template<typename I, typename T1, typename T2,typename T3> void csr_matvec_noomp_contig(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const T3 x[], T3 y[]) { // const T3 a_cast = T3(a); if(overwrite_y){ for(I k = 0; k<n_row; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj]]; } y[k] = a * sum; } }else{ for(I k = 0; k<n_row; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj]]; } y[k] += a * sum; } } } template<typename I, typename T1, typename T2,typename T3> void csr_matvec_noomp_strided(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride, const T3 x[], const npy_intp y_stride, T3 y[]) { // const T3 a_cast = T3(a); if(overwrite_y){ for(I k = 0; k<n_row; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj] * x_stride]; } y[k * y_stride] = a * sum; } }else{ for(I k = 0; k<n_row; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj] * x_stride]; } y[k * y_stride] += a * sum; } } } template<typename I, typename T1, typename T2,typename T3> void csr_matvecs_noomp_strided(const bool overwrite_y, const I n_row, const npy_intp n_vecs, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T3 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T3 y[]) { if(overwrite_y){ for(npy_intp i = 0; i < n_row; i++){ for(npy_intp j = 0; j < n_vecs; j++){ y[i * y_stride_row + j * y_stride_col] = 0; } } } if(y_stride_col < y_stride_row){ for(I k = 0; k<n_row; k++){ for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ const T3 ax = a * Ax[jj]; const T3 * x_row = x + x_stride_row * Aj[jj]; axpy_strided(n_vecs, ax, x_stride_col, x_row, y_stride_col, y); } y += y_stride_row; } } else{ for(I m=0;m<n_vecs; m++){ for(I k = 0; k<n_row; k++){ for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ const npy_intp ii = x_stride_row * Aj[jj]; (*y) += (a * Ax[jj]) * x[ii]; } y += y_stride_row; } x += x_stride_col; } } } #if defined(_OPENMP) #include "csrmv_merge.h" #include "openmp.h" template<typename I, typename T1, typename T2,typename T3> inline void csr_matvec_omp_contig(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const T3 x[], T3 y[]) { const int nthread = omp_get_max_threads(); std::vector<I> rco_vec(nthread); std::vector<T3> vco_vec(nthread); I * rco = &rco_vec[0]; T3 * vco = &vco_vec[0]; #pragma omp parallel shared(Ap,Aj,Ax,x,rco,vco,y) firstprivate(overwrite_y,n_row) { csrmv_merge(overwrite_y,n_row,Ap,Aj,Ax,a,x,rco,vco,y); } } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvec_omp_strided(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride, const T3 x[], const npy_intp y_stride, T3 y[]) { const int nthread = omp_get_max_threads(); std::vector<I> rco_vec(nthread); std::vector<T3> vco_vec(nthread); I * rco = &rco_vec[0]; T3 * vco = &vco_vec[0]; #pragma omp parallel { csrmv_merge_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,rco,vco,y_stride,y); } } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvecs_omp_strided(const bool overwrite_y, const I n_row, const npy_intp n_vecs, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T3 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T3 y[]) { csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } #else template<typename I, typename T1, typename T2,typename T3> inline void csr_matvec_omp_contig(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const T3 x[], T3 y[]) { csr_matvec_noomp_contig(overwrite_y,n_row,Ap,Aj,Ax,a,x,y); } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvec_omp_strided(const bool overwrite_y, const I n_row, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride, const T3 x[], const npy_intp y_stride, T3 y[]) { csr_matvec_noomp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,y_stride,y); } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvecs_omp_strided(const bool overwrite_y, const I n_row, const npy_intp n_vecs, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T3 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T3 y[]) { csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } #endif // when openmp is not being used omp and noomp versions are identical template<typename I, typename T1, typename T2,typename T3> void csr_matvec_noomp(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_byte, const T3 x[], const npy_intp y_stride_byte, T3 y[]) { const npy_intp y_stride = y_stride_byte/sizeof(T3); const npy_intp x_stride = x_stride_byte/sizeof(T3); if(y_stride == 1){ if(x_stride == 1){ csr_matvec_noomp_contig(overwrite_y,n_row,Ap,Aj,Ax,a,x,y); } else{ csr_matvec_noomp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,1,y); } } else{ if(x_stride == 1){ csr_matvec_noomp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,1,x,y_stride,y); } else{ csr_matvec_noomp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,y_stride,y); } } } template<typename I, typename T1, typename T2,typename T3> void csr_matvec_omp(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_byte, const T3 x[], const npy_intp y_stride_byte, T3 y[]) { const npy_intp y_stride = y_stride_byte/sizeof(T3); const npy_intp x_stride = x_stride_byte/sizeof(T3); if(y_stride == 1){ if(x_stride == 1){ csr_matvec_omp_contig(overwrite_y,n_row,Ap,Aj,Ax,a,x,y); } else{ csr_matvec_omp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,1,y); } } else{ if(x_stride == 1){ csr_matvec_omp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,1,x,y_stride,y); } else{ csr_matvec_omp_strided(overwrite_y,n_row,Ap,Aj,Ax,a,x_stride,x,y_stride,y); } } } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvecs_noomp(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_row_byte, const npy_intp x_stride_col_byte, const T3 x[], const npy_intp y_stride_row_byte, const npy_intp y_stride_col_byte, T3 y[]) { const npy_intp y_stride_row = y_stride_row_byte/sizeof(T3); const npy_intp y_stride_col = y_stride_col_byte/sizeof(T3); const npy_intp x_stride_row = x_stride_row_byte/sizeof(T3); const npy_intp x_stride_col = x_stride_col_byte/sizeof(T3); if(y_stride_col==1){ if(x_stride_col==1){ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y); } else if(x_stride_row==1){ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y); } else{ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y); } } else if(y_stride_row==1){ if(x_stride_col==1){ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,1,y_stride_col,y); } else if(x_stride_row==1){ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,1,y_stride_col,y); } else{ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y); } } else{ csr_matvecs_noomp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } } template<typename I, typename T1, typename T2,typename T3> inline void csr_matvecs_omp(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const npy_intp x_stride_row_byte, const npy_intp x_stride_col_byte, const T3 x[], const npy_intp y_stride_row_byte, const npy_intp y_stride_col_byte, T3 y[]) { const npy_intp y_stride_row = y_stride_row_byte/sizeof(T3); const npy_intp y_stride_col = y_stride_col_byte/sizeof(T3); const npy_intp x_stride_row = x_stride_row_byte/sizeof(T3); const npy_intp x_stride_col = x_stride_col_byte/sizeof(T3); if(y_stride_col==1){ if(x_stride_col==1){ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y); } else if(x_stride_row==1){ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y); } else{ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y); } } else if(y_stride_row==1){ if(x_stride_col==1){ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,1,y_stride_col,y); } else if(x_stride_row==1){ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,1,y_stride_col,y); } else{ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y); } } else{ csr_matvecs_omp_strided(overwrite_y,n_row,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } } #endif
HYPRE_parcsr_pcg.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGCreate( MPI_Comm comm, HYPRE_Solver *solver ) { hypre_PCGFunctions * pcg_functions; if (!solver) { hypre_error_in_arg(2); return hypre_error_flag; } pcg_functions = hypre_PCGFunctionsCreate( hypre_CAlloc, hypre_ParKrylovFree, hypre_ParKrylovCommInfo, hypre_ParKrylovCreateVector, hypre_ParKrylovDestroyVector, hypre_ParKrylovMatvecCreate, hypre_ParKrylovMatvec, hypre_ParKrylovMatvecDestroy, hypre_ParKrylovInnerProd, hypre_ParKrylovCopyVector, hypre_ParKrylovClearVector, hypre_ParKrylovScaleVector, hypre_ParKrylovAxpy, hypre_ParKrylovIdentitySetup, hypre_ParKrylovIdentity ); *solver = ( (HYPRE_Solver) hypre_PCGCreate( pcg_functions ) ); return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGDestroy *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGDestroy( HYPRE_Solver solver ) { return( hypre_PCGDestroy( (void *) solver ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetup *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { return( HYPRE_PCGSetup( solver, (HYPRE_Matrix) A, (HYPRE_Vector) b, (HYPRE_Vector) x ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSolve *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSolve( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { return( HYPRE_PCGSolve( solver, (HYPRE_Matrix) A, (HYPRE_Vector) b, (HYPRE_Vector) x ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetTol *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetTol( HYPRE_Solver solver, HYPRE_Real tol ) { return( HYPRE_PCGSetTol( solver, tol ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetAbsoluteTol *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetAbsoluteTol( HYPRE_Solver solver, HYPRE_Real a_tol ) { return( HYPRE_PCGSetAbsoluteTol( solver, a_tol ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetMaxIter *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetMaxIter( HYPRE_Solver solver, HYPRE_Int max_iter ) { return( HYPRE_PCGSetMaxIter( solver, max_iter ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetStopCrit *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetStopCrit( HYPRE_Solver solver, HYPRE_Int stop_crit ) { return( HYPRE_PCGSetStopCrit( solver, stop_crit ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetTwoNorm *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetTwoNorm( HYPRE_Solver solver, HYPRE_Int two_norm ) { return( HYPRE_PCGSetTwoNorm( solver, two_norm ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetRelChange *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetRelChange( HYPRE_Solver solver, HYPRE_Int rel_change ) { return( HYPRE_PCGSetRelChange( solver, rel_change ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetPrecond *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetPrecond( HYPRE_Solver solver, HYPRE_PtrToParSolverFcn precond, HYPRE_PtrToParSolverFcn precond_setup, HYPRE_Solver precond_solver ) { return( HYPRE_PCGSetPrecond( solver, (HYPRE_PtrToSolverFcn) precond, (HYPRE_PtrToSolverFcn) precond_setup, precond_solver ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetPrecond *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetPrecond( HYPRE_Solver solver, HYPRE_Solver *precond_data_ptr ) { return( HYPRE_PCGGetPrecond( solver, precond_data_ptr ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetPrintLevel * an obsolete function; use HYPRE_PCG* functions instead *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetPrintLevel( HYPRE_Solver solver, HYPRE_Int level ) { return( HYPRE_PCGSetPrintLevel( solver, level ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetLogging * an obsolete function; use HYPRE_PCG* functions instead *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetLogging( HYPRE_Solver solver, HYPRE_Int level ) { return( HYPRE_PCGSetLogging( solver, level ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetNumIterations *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetNumIterations( HYPRE_Solver solver, HYPRE_Int *num_iterations ) { return( HYPRE_PCGGetNumIterations( solver, num_iterations ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetFinalRelativeResidualNorm *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetFinalRelativeResidualNorm( HYPRE_Solver solver, HYPRE_Real *norm ) { return( HYPRE_PCGGetFinalRelativeResidualNorm( solver, norm ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetResidual *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetResidual( HYPRE_Solver solver, HYPRE_ParVector *residual ) { return( HYPRE_PCGGetResidual( solver, (void *) residual ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScaleSetup *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRDiagScaleSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector y, HYPRE_ParVector x ) { return 0; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRDiagScale( HYPRE_Solver solver, HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, x_data); //hypre_SyncCudaComputeStream(hypre_handle); #else /* #if defined(HYPRE_USING_CUDA) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < local_size; i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRSymPrecondSetup *--------------------------------------------------------------------------*/ /* HYPRE_Int HYPRE_ParCSRSymPrecondSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) A; hypre_ParVector *y = (hypre_ParVector *) b; hypre_ParVector *x = (hypre_ParVector *) x; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_diag = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Real *A_offd = hypre_CSRMatrixData(hypre_ParCSRMatrixOffD(A)); HYPRE_Int i, ierr = 0; hypre_ParCSRMatrix *Asym; MPI_Comm comm; HYPRE_Int global_num_rows; HYPRE_Int global_num_cols; HYPRE_Int *row_starts; HYPRE_Int *col_starts; HYPRE_Int num_cols_offd; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; Asym = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); for (i=0; i < hypre_VectorSize(hypre_ParVectorLocalVector(x)); i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } return ierr; } */
GB_subassign_10_and_18.c
//------------------------------------------------------------------------------ // GB_subassign_10_and_18: C(I,J)<M or !M,repl> = A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 10: C(I,J)<M,repl> = A ; using S // Method 18: C(I,J)<!M,repl> = A ; using S // M: present // Mask_comp: true or false // C_replace: true // accum: NULL // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // M, A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_10_and_18 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, // if true, use the only structure of M const bool Mask_comp, // if true, !M, else use M const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_MASK ; GB_GET_A ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 10: C(I,J)<M,repl> = A ; using S // Method 18: C(I,J)<!M,repl> = A ; using S //-------------------------------------------------------------------------- // Time: Optimal. Omega (nnz(A)+nnz(S)), since all entries in S+A must be // traversed, and the corresponding entry in M (even if not present) // determines the action to take. M can add a log(m) factor if sparse. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // S (i,j) is present but A (i,j) is not // ----[C . 1] or [X . 1]------------------------------- // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still zombie // ----[C . 0] or [X . 0]------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (!Sfound && Afound) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } else if (Sfound && Afound) { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): A to C no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // S (i,j) is present but A (i,j) is not // ----[C . 1] or [X . 1]------------------------------- // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still zombie // ----[C . 0] or [X . 0]------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): A to C no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (A) ; } } // while list S (:,j) has entries. List A (:,j) exhausted. while (pS < pS_end) { // ----[C . 1] or [X . 1]----------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still a zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = GBI (Ai, pA, Avlen) ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; } } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; GB_NEXT (A) ; } } // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = GBI (Ai, pA, Avlen) ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; } GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" // enable tests #define CHECK 1 #define PAR_A 1 #define PAR_P 1 #define PAR_1_DATA_A 1 #define PAR_T_DATA_A 1 #define PAR_TOFROM_A 1 #define PAR_TOALL_FROM_A 1 #define N 1024 /* data */ #define T 8 /* num threads */ #define M (N/T) /* data per thread */ #define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i; }) int main(void){ #if CHECK check_offloading(); #endif int fail; double A[N], B[N], C[N], D[N], E[N]; double *pA, *pB, *pC, *pD, *pE; int t; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; pD = &D[0]; pE = &E[0]; #if PAR_A INIT(); // each thread compute one quarter #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target map(A[i0:M], C[i0:M], D[i0:M]) { for(int i = i0; i < i1; i++) A[i] = C[i] + D[i] + 1; } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_A: Failed\n"); } else { printf ("Test PAR_A: Succeeded\n"); } #endif #if PAR_P INIT(); // each thread compute one quarter #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target map(pA[i0:M], pC[i0:M], pD[i0:M]) { for(int i = i0; i < i1; i++) pA[i] = pC[i] + pD[i] + 1; } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_P: Failed\n"); } else { printf ("Test PAR_P: Succeeded\n"); } #endif #if PAR_1_DATA_A INIT(); // each thread compute one quarter #pragma omp target data map(A, B, C) { #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target map(A[i0:M], C[i0:M], D[i0:M]) { for(int i = i0; i < i1; i++) A[i] = C[i] + D[i] + 1; } } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_1_DATA_A: Failed\n"); } else { printf ("Test PAR_1_DATA_A: Succeeded\n"); } #endif #if PAR_T_DATA_A INIT(); // each thread compute one quarter #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target data map(A[i0:M], C[i0:M], D[i0:M]) { #pragma omp target map(A[i0:M], C[i0:M], D[i0:M]) { for(int i = i0; i < i1; i++) A[i] = C[i] + D[i] + 1; } } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_T_DATA_A: Failed\n"); } else { printf ("Test PAR_T_DATA_A: Succeeded\n"); } #endif #if PAR_TOFROM_A INIT(); // each thread compute one quarter #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target map(from: A[i0:M]) map(to: C[i0:M], D[i0:M]) { for(int i = i0; i < i1; i++) A[i] = C[i] + D[i] + 1; } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_TOFROM_A: Failed\n"); } else { printf ("Test PAR_TOFROM_A: Succeeded\n"); } #endif #if PAR_TOALL_FROM_A INIT(); // each thread compute one quarter // copy the whole to data... only the first one should move it #pragma omp parallel for num_threads(T) schedule(static, 1) for(t=0; t<T; t++) { int i0 = t*M; int i1 = (t+1)*M; #pragma omp target map(from: A[i0:M]) map(to: C, D) { for(int i = i0; i < i1; i++) A[i] = C[i] + D[i] + 1; } } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test PAR_TOALL_FROM_A: Failed\n"); } else { printf ("Test PAR_TOALL_FROM_A: Succeeded\n"); } #endif return 0; }
core.c
/* Main solver routines for heat equation solver */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "heat.h" /* Update the temperature values using five-point stencil */ void evolve(field *curr, field *prev, double a, double dt) { int i, j; double dx2, dy2; /* Determine the temperature field at next time step * As we have fixed boundary conditions, the outermost gridpoints * are not updated. */ dx2 = prev->dx * prev->dx; dy2 = prev->dy * prev->dy; #pragma omp parallel for private(i, j) for (i = 1; i < curr->nx + 1; i++) { for (j = 1; j < curr->ny + 1; j++) { curr->data[i][j] = prev->data[i][j] + a * dt * ((prev->data[i + 1][j] - 2.0 * prev->data[i][j] + prev->data[i - 1][j]) / dx2 + (prev->data[i][j + 1] - 2.0 * prev->data[i][j] + prev->data[i][j - 1]) / dy2); } } }
GB_unaryop__ainv_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int32_fp32 // op(A') function: GB_tran__ainv_int32_fp32 // C type: int32_t // A type: float // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int32_fp32 ( int32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SVRGUpdater.h
/* * Copyright 2016 [See AUTHORS file for list of authors] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _SVRG_UPDATER_ #define _SVRG_UPDATER_ #include "Updater.h" #include "../Gradient/Gradient.h" class SVRGUpdater : public Updater { protected: double n_updates_so_far; std::vector<double> model_copy; // Vectors for computing SVRG related data. REGISTER_THREAD_LOCAL_1D_VECTOR(lambda); REGISTER_THREAD_LOCAL_2D_VECTOR(h_x); REGISTER_THREAD_LOCAL_2D_VECTOR(h_y); REGISTER_GLOBAL_1D_VECTOR(g); // Vectors for computing the sum of gradients (g). REGISTER_THREAD_LOCAL_2D_VECTOR(g_kappa); REGISTER_THREAD_LOCAL_1D_VECTOR(g_lambda); REGISTER_THREAD_LOCAL_2D_VECTOR(g_h_bar); REGISTER_GLOBAL_1D_VECTOR(n_zeroes); void PrepareMu(std::vector<int> &coordinates) override { std::vector<double> &cur_model = model->ModelData(); std::vector<double> &lambda = GET_THREAD_LOCAL_VECTOR(lambda); for (int i = 0; i < coordinates.size(); i++) { int index = coordinates[i]; model->Lambda(index, lambda[index], cur_model); } } void PrepareNu(std::vector<int> &coordinates) override { } void PrepareH(Datapoint *datapoint, Gradient *g) override { std::vector<double> &cur_model = model->ModelData(); std::vector<std::vector<double> > &h_x = GET_THREAD_LOCAL_VECTOR(h_x); std::vector<std::vector<double> > &h_y = GET_THREAD_LOCAL_VECTOR(h_y); g->datapoint = datapoint; model->PrecomputeCoefficients(datapoint, g, cur_model); int coord_size = model->CoordinateSize(); for (int i = 0; i < datapoint->GetCoordinates().size(); i++) { int index = datapoint->GetCoordinates()[i]; model->H_bar(index, h_x[index], g, cur_model); } model->PrecomputeCoefficients(datapoint, g, model_copy); for (int i = 0; i < datapoint->GetCoordinates().size(); i++) { int index = datapoint->GetCoordinates()[i]; model->H_bar(index, h_y[index], g, model_copy); } } double H(int coordinate, int index_into_coordinate_vector) { return -FLAGS_learning_rate * (GET_THREAD_LOCAL_VECTOR(h_x)[coordinate][index_into_coordinate_vector] - GET_THREAD_LOCAL_VECTOR(h_y)[coordinate][index_into_coordinate_vector]); } double Nu(int coordinate, int index_into_coordinate_vector) { return FLAGS_learning_rate * (GET_GLOBAL_VECTOR(g)[coordinate*model->CoordinateSize()+index_into_coordinate_vector] - GET_THREAD_LOCAL_VECTOR(lambda)[coordinate] * model_copy[coordinate*model->CoordinateSize()+index_into_coordinate_vector]); } double Mu(int coordinate) { return GET_THREAD_LOCAL_VECTOR(lambda)[coordinate] * FLAGS_learning_rate; } void ModelCopy() { // Make a copy of the model every epoch. model_copy = model->ModelData(); // Clear the sum of gradients. std::vector<double> &g = GET_GLOBAL_VECTOR(g); std::fill(g.begin(), g.end(), 0); // Compute average sum of gradients on the model copy. std::vector<double> &n_zeroes = GET_GLOBAL_VECTOR(n_zeroes); int coord_size = model->CoordinateSize(); // zero gradients. #pragma omp parallel for num_threads(FLAGS_n_threads) for (int coordinate = 0; coordinate < model->NumParameters(); coordinate++) { std::vector<std::vector<double> > &g_kappa = GET_THREAD_LOCAL_VECTOR(g_kappa); std::vector<double> &g_lambda = GET_THREAD_LOCAL_VECTOR(g_lambda); model->Kappa(coordinate, g_kappa[coordinate], model_copy); model->Lambda(coordinate, g_lambda[coordinate], model_copy); for (int j = 0; j < coord_size; j++) { g[coordinate*coord_size+j] = (g_lambda[coordinate] * model_copy[coordinate*coord_size+j] - g_kappa[coordinate][j]) * n_zeroes[coordinate]; } } // non zero gradients. Essentially do SGD here, on the same partitioning pattern. #pragma omp parallel num_threads(FLAGS_n_threads) { int thread = omp_get_thread_num(); for (int batch = 0; batch < datapoint_partitions->NumBatches(); batch++) { #pragma omp barrier for (int index = 0; index < datapoint_partitions->NumDatapointsInBatch(thread, batch); index++) { Datapoint *datapoint = datapoint_partitions->GetDatapoint(thread, batch, index); Gradient *grad = &thread_gradients[omp_get_thread_num()]; grad->datapoint = datapoint; model->PrecomputeCoefficients(datapoint, grad, model_copy); std::vector<std::vector<double> > &g_kappa = GET_THREAD_LOCAL_VECTOR(g_kappa); std::vector<double> &g_lambda = GET_THREAD_LOCAL_VECTOR(g_lambda); std::vector<std::vector<double> > &g_h_bar = GET_THREAD_LOCAL_VECTOR(g_h_bar); for (auto & coord : datapoint->GetCoordinates()) { model->H_bar(coord, g_h_bar[coord], grad, model_copy); model->Lambda(coord, g_lambda[coord], model_copy); model->Kappa(coord, g_kappa[coord], model_copy); } for (auto & coord : datapoint->GetCoordinates()) { for (int j = 0; j < coord_size; j++) { g[coord*coord_size+j] += g_lambda[coord] * model_copy[coord*coord_size+j] - g_kappa[coord][j] + g_h_bar[coord][j]; } } } } } #pragma omp parallel for num_threads(FLAGS_n_threads) for (int i = 0; i < model->NumParameters(); i++) { for (int j = 0; j < coord_size; j++) { g[i*coord_size+j] /= datapoints.size(); } } } public: SVRGUpdater(Model *model, std::vector<Datapoint *> &datapoints) : Updater(model, datapoints) { INITIALIZE_GLOBAL_1D_VECTOR(g, model->NumParameters() * model->CoordinateSize()); INITIALIZE_THREAD_LOCAL_1D_VECTOR(lambda, model->NumParameters()); INITIALIZE_THREAD_LOCAL_2D_VECTOR(h_x, model->NumParameters(), model->CoordinateSize()); INITIALIZE_THREAD_LOCAL_2D_VECTOR(h_y, model->NumParameters(), model->CoordinateSize()); model_copy.resize(model->ModelData().size()); INITIALIZE_THREAD_LOCAL_2D_VECTOR(g_kappa, model->NumParameters(), model->CoordinateSize()); INITIALIZE_THREAD_LOCAL_1D_VECTOR(g_lambda, model->NumParameters()); INITIALIZE_THREAD_LOCAL_2D_VECTOR(g_h_bar, model->NumParameters(), model->CoordinateSize()); // Compute number of zeroes for each column (parameters) of the model. INITIALIZE_GLOBAL_1D_VECTOR(n_zeroes, model->NumParameters()); std::vector<double> &n_zeroes = GET_GLOBAL_VECTOR(n_zeroes); for (int i = 0; i < model->NumParameters(); i++) { n_zeroes[i] = datapoints.size(); } int sum = 0; for (int dp = 0; dp < datapoints.size(); dp++) { for (auto &coordinate : datapoints[dp]->GetCoordinates()) { n_zeroes[coordinate]--; sum++; } } } void Update(Model *model, Datapoint *datapoint) override { Updater::Update(model, datapoint); } void EpochBegin() override { Updater::EpochBegin(); ModelCopy(); } ~SVRGUpdater() { } }; #endif
GB_unop__isnan_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fp64) // op(A') function: GB (_unop_tran__isnan_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dem_openmp.c
/* CS205 project: Density equalizing map projections Date: April 6th 2019 Compiler: gcc diff_map2.c -o exec -lm -lpng project members: Millie Zhou, Lemaire Baptiste, Benedikt Groever project goal: density equalizing map DEM projections Input files: -colchart.txt -density.txt -usa_vs.png Output file: -dens_eq.png */ #include <math.h> #include "dem.h" #include "demmpi.h" #include <omp.h> #include <mpi.h> #define ROUND_DOWN(x, s) ((x) & ~((s)-1)) // Number of states/entities to calculates: #define SIZE 3142 /** Function to integrate the density and reference map fields forward in time by dt. */ void step(int msteps, int size_m, int size_n, int rank_m, int rank_n, int x1, int y1, int x2, int y2, double dt, double *time, double *u, double *cu, double *X, double *cX, double h, double ih2, int m, int n) { double nu = dt/(h*h); double fac = ih2*dt/h; for(int steps = 0; steps < msteps; steps++){ /** Calculate the upwinded update for the reference map. */ #pragma omp parallel for schedule(static) shared(fac, u, cX, X, m, n) for(int i=x1; i < x2; i++){ int j; for(j=y1; j < y2; j++){ double vx = 0; double vy = 0; int pos, i_cond, j_cond, leqx, leqy; i_cond = ((i>0)&&(i<m-1)); j_cond = ((j>0)&&(j<n-1)); pos = i*n+j; vx = (-1.0) * (u[pos+n]-u[pos-n]) * fac / u[pos]; vy = (-1.0) * (u[pos+1]-u[pos-1]) * fac / u[pos]; pos = i*n*2+j*2; leqx = (vx > 0); leqy = (vy > 0); cX[pos+0] = i_cond * (leqx*vx*(-1*X[pos+0] + X[pos-2*n+0]) + (!leqx) * vx * (X[pos+0] - X[pos+2*n+0])); cX[pos+1] = i_cond * (leqx*vx*(-1*X[pos+1] + X[pos-2*n+1]) + (!leqx) * vx * (X[pos+1] - X[pos+2*n+1])); cX[pos+0] += j_cond * (leqy*vy*(-1*X[pos+0] + X[pos-2+0]) + (!leqy) * vy * (X[pos+0]-1*X[pos+2+0])); cX[pos+1] += j_cond * (leqy*vy*(-1*X[pos+1] + X[pos-2+1]) + (!leqy) * vy * (X[pos+1]-1*X[pos+2+1])); } } #pragma omp parallel for schedule(static) shared(cX, X, n) for(int i=x1; i < x2; i++){ for(int j=y1; j < y2; j++){ X[i*n*2+j*2+0] += cX[i*n*2+j*2+0]; X[i*n*2+j*2+1] += cX[i*n*2+j*2+1]; } } /* MPI updating neighbour pixels */ ghost_exchange_X(size_m, size_n, rank_m, rank_n, X, x1, y1, x2, y2, m, n); /* Do the finite-difference update */ #pragma omp parallel for schedule(static) shared(cu, u, m, n) for (int i=x1; i<x2; i++) { for (int j=y1; j<y2; j++) { double tem; int k; tem = (i>0)*u[(i-1)*n+j] + (j>0)*u[i*n+(j-1)] + (j<n-1)*u[i*n+(j+1)] + (i<m-1)*u[(i+1)*n+j]; k = (i>0) + (j>0) + (j<n-1) + (i<m-1); cu[i*n+j] = tem - k * u[i*n+j]; } } #pragma omp parallel for schedule(static) shared(cu, u, nu, n) for(int i=x1; i < x2; i++){ for(int j=y1; j < y2; j++){ u[i*n+j] += cu[i*n+j] * nu; } } /* MPI updating neighbour pixels */ ghost_exchange_u(size_m,size_n,rank_m,rank_n,u,x1,y1,x2,y2,m,n); /* Print the current time and the extremal values of density */ *time += dt; } print_max_min(size_m,size_n,rank_m,rank_n,u,time,x1,y1,x2,y2,m,n); } /* Main program for density equalizing map projections. */ int main(int argc, char *argv[]) { /* Initialize MPI */ int rank, size, provided; MPI_Init_thread(&argc,&argv, MPI_THREAD_FUNNELED, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); double t1, t2, t3, t4; t1 = MPI_Wtime(); /* Read in the undeformed US map. */ int m, n; int *o; o = read_map(argv[1], &m, &n); /* Get subimage boundaries of process and print diagnostic MPI messages */ int x1, y1, x2, y2; setup_mpi(rank, &size, & x1, &y1, &x2, &y2, m, n); if(rank==0){printf("Image size is (%d,%d)\n", m, n);} int nthreads = omp_get_max_threads(); MPI_Barrier(MPI_COMM_WORLD); printf("MPI rank %d has %d omp processes\n", rank, nthreads); MPI_Barrier(MPI_COMM_WORLD); int rank_m, rank_n, size_m, size_n; get_position(size, rank, &rank_m, &rank_n, &size_m, &size_n); /* Get density data from quantity of interest data and color bar codes */ double *u = (double*)malloc(m*n * sizeof(double)); double *cu = (double*)malloc(m*n * sizeof(double)); image_to_density_map(o, u, argv[2], argv[3], SIZE, rank); /** Grid spacing. */ double h = 1.00; double ih2 = 0.5/h; /** Initialize the reference map coordinates. */ double *X = (double*)malloc(m*n*2 * sizeof(double)); double *cX = (double*)malloc(m*n*2 * sizeof(double)); for(int i=0; i < m; i++){ for(int j=0; j < n; j++){ X[i*n*2+j*2+0] = h*i; X[i*n*2+j*2+1] = h*j; } } /* Calculate timestep size. */ double dt = 0.24*h*h; double T = (m*m+n*n)/12.0; int nsteps = (int) ceil(T/dt); dt = T/nsteps; if(rank==0){ printf("Solving to T= %10f using %d timesteps.\n", T, nsteps); } t2 = MPI_Wtime(); /* Perform the integration timesteps, using the smaller dt for the first few steps to deal with the large velocities that initially occur. */ double time = 0; int l; for(l=0; l < 24; l++){ step(1, size_m, size_n, rank_m, rank_n, x1, y1, x2, y2, dt/24.0, &time, u, cu, X, cX, h, ih2, m, n); } for(l=0; l < (int)((nsteps-1)/1000); l++){ step(1000, size_m, size_n, rank_m, rank_n, x1, y1, x2, y2, dt , &time, u, cu, X, cX, h, ih2, m, n); } step(nsteps-l*1000-1, size_m, size_n, rank_m, rank_n, x1, y1, x2, y2, dt , &time, u, cu, X, cX, h, ih2, m, n); t3 = MPI_Wtime(); /* worker node send reference map to master which saves the png */ send_receive_save(rank, size, o, X, x1, y1, x2, y2, m, n, argv[1], argv[4]); //print_max_min(size_m,size_n,rank_m,rank_n,u,&time,x1,y1,x2,y2,m,n); t4 = MPI_Wtime(); if(rank == 0){ printf("Loading and preprocessing data: %f s\n", t2-t1); printf("Processing (DEM method): %f s\n", t3-t2); printf("Postprocessing and saving data : %f s\n", t4-t3); } MPI_Finalize(); free(u); free(cu); free(X); free(cX); return 0; }
GB_binop__bset_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_03__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32) // C=scalar+B GB (_bind1st__bset_int32) // C=scalar+B' GB (_bind1st_tran__bset_int32) // C=A+scalar GB (_bind2nd__bset_int32) // C=A'+scalar GB (_bind2nd_tran__bset_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITSET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bset_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif